diff --git a/crates/core_arch/src/lib.rs b/crates/core_arch/src/lib.rs index a059a9c225..5bedefe42d 100644 --- a/crates/core_arch/src/lib.rs +++ b/crates/core_arch/src/lib.rs @@ -34,7 +34,10 @@ aarch64_unstable_target_feature, bigint_helper_methods, funnel_shifts, - avx10_target_feature + avx10_target_feature, + const_trait_impl, + const_cmp, + const_eval_select )] #![cfg_attr(test, feature(test, abi_vectorcall, stdarch_internal))] #![deny(clippy::missing_inline_in_public_items)] diff --git a/crates/core_arch/src/mod.rs b/crates/core_arch/src/mod.rs index 2105cca1b4..7fc0ed0fca 100644 --- a/crates/core_arch/src/mod.rs +++ b/crates/core_arch/src/mod.rs @@ -5,6 +5,11 @@ #[macro_use] mod macros; +#[cfg(test)] +mod test; +#[cfg(test)] +use test::assert_eq_const; + #[cfg(any(target_arch = "riscv32", target_arch = "riscv64", doc))] mod riscv_shared; diff --git a/crates/core_arch/src/simd.rs b/crates/core_arch/src/simd.rs index 9fa8e25022..b9dbde7597 100644 --- a/crates/core_arch/src/simd.rs +++ b/crates/core_arch/src/simd.rs @@ -3,13 +3,15 @@ #![allow(non_camel_case_types)] #[inline(always)] -pub(crate) unsafe fn simd_imax(a: T, b: T) -> T { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(crate) const unsafe fn simd_imax(a: T, b: T) -> T { let mask: T = crate::intrinsics::simd::simd_gt(a, b); crate::intrinsics::simd::simd_select(mask, a, b) } #[inline(always)] -pub(crate) unsafe fn simd_imin(a: T, b: T) -> T { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(crate) const unsafe fn simd_imin(a: T, b: T) -> T { let mask: T = crate::intrinsics::simd::simd_lt(a, b); crate::intrinsics::simd::simd_select(mask, a, b) } @@ -35,7 +37,8 @@ macro_rules! simd_ty { } // FIXME: Workaround rust@60637 #[inline(always)] - pub(crate) fn splat(value: $elem_type) -> Self { + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + pub(crate) const fn splat(value: $elem_type) -> Self { #[derive(Copy, Clone)] #[repr(simd)] struct JustOne([$elem_type; 1]); @@ -50,12 +53,12 @@ macro_rules! simd_ty { /// Use for testing only. // FIXME: Workaround rust@60637 #[inline(always)] - pub(crate) fn extract(&self, index: usize) -> $elem_type { + pub(crate) const fn extract(&self, index: usize) -> $elem_type { self.as_array()[index] } #[inline] - pub(crate) fn as_array(&self) -> &[$elem_type; $len] { + pub(crate) const fn as_array(&self) -> &[$elem_type; $len] { let simd_ptr: *const Self = self; let array_ptr: *const [$elem_type; $len] = simd_ptr.cast(); // SAFETY: We can always read the prefix of a simd type as an array. @@ -65,7 +68,8 @@ macro_rules! simd_ty { } } - impl core::cmp::PartialEq for $id { + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + const impl core::cmp::PartialEq for $id { #[inline] fn eq(&self, other: &Self) -> bool { self.as_array() == other.as_array() @@ -101,7 +105,8 @@ macro_rules! simd_m_ty { // FIXME: Workaround rust@60637 #[inline(always)] - pub(crate) fn splat(value: bool) -> Self { + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + pub(crate) const fn splat(value: bool) -> Self { #[derive(Copy, Clone)] #[repr(simd)] struct JustOne([$elem_type; 1]); @@ -112,7 +117,7 @@ macro_rules! simd_m_ty { } #[inline] - pub(crate) fn as_array(&self) -> &[$elem_type; $len] { + pub(crate) const fn as_array(&self) -> &[$elem_type; $len] { let simd_ptr: *const Self = self; let array_ptr: *const [$elem_type; $len] = simd_ptr.cast(); // SAFETY: We can always read the prefix of a simd type as an array. @@ -122,7 +127,8 @@ macro_rules! simd_m_ty { } } - impl core::cmp::PartialEq for $id { + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + const impl core::cmp::PartialEq for $id { #[inline] fn eq(&self, other: &Self) -> bool { self.as_array() == other.as_array() diff --git a/crates/core_arch/src/test.rs b/crates/core_arch/src/test.rs new file mode 100644 index 0000000000..976d4ac1b0 --- /dev/null +++ b/crates/core_arch/src/test.rs @@ -0,0 +1,25 @@ +use crate::fmt::Debug; + +#[track_caller] +#[allow(unused)] +pub(crate) fn assert_eq_rt(a: &T, b: &T) { + std::assert_eq!(a, b) +} + +#[allow(unused)] +macro_rules! assert_eq_const { + ($a:expr, $b:expr $(,)?) => {{ + #[inline(always)] + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + const fn assert_eq_ct(a: &T, b: &T) { + assert!(a == b, concat!("`", stringify!($a), "` != `", stringify!($b), "`")); + } + + $crate::intrinsics::const_eval_select((&$a, &$b), assert_eq_ct, $crate::core_arch::test::assert_eq_rt); + }}; + ($a:expr, $b:expr, $($t:tt)+) => { + ::std::assert_eq!($a, $b, $($t)+) + }; +} + +pub(crate) use assert_eq_const; diff --git a/crates/core_arch/src/x86/abm.rs b/crates/core_arch/src/x86/abm.rs index e6d5517600..59f3877b4a 100644 --- a/crates/core_arch/src/x86/abm.rs +++ b/crates/core_arch/src/x86/abm.rs @@ -29,7 +29,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "lzcnt")] #[cfg_attr(test, assert_instr(lzcnt))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _lzcnt_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _lzcnt_u32(x: u32) -> u32 { x.leading_zeros() } @@ -40,23 +41,25 @@ pub fn _lzcnt_u32(x: u32) -> u32 { #[target_feature(enable = "popcnt")] #[cfg_attr(test, assert_instr(popcnt))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _popcnt32(x: i32) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _popcnt32(x: i32) -> i32 { x.count_ones() as i32 } #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86::*; #[simd_test(enable = "lzcnt")] - unsafe fn test_lzcnt_u32() { + const unsafe fn test_lzcnt_u32() { assert_eq!(_lzcnt_u32(0b0101_1010), 25); } #[simd_test(enable = "popcnt")] - unsafe fn test_popcnt32() { + const unsafe fn test_popcnt32() { assert_eq!(_popcnt32(0b0101_1010), 4); } } diff --git a/crates/core_arch/src/x86/avx.rs b/crates/core_arch/src/x86/avx.rs index c50c83fcaa..acba0a3db7 100644 --- a/crates/core_arch/src/x86/avx.rs +++ b/crates/core_arch/src/x86/avx.rs @@ -30,7 +30,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vaddpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_add_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_add_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { simd_add(a, b) } } @@ -42,7 +43,8 @@ pub fn _mm256_add_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vaddps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_add_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_add_ps(a: __m256, b: __m256) -> __m256 { unsafe { simd_add(a, b) } } @@ -55,7 +57,8 @@ pub fn _mm256_add_ps(a: __m256, b: __m256) -> __m256 { // See https://github.com/rust-lang/stdarch/issues/71 #[cfg_attr(test, assert_instr(vandp))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_and_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_and_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { let a: u64x4 = transmute(a); let b: u64x4 = transmute(b); @@ -71,7 +74,8 @@ pub fn _mm256_and_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vandps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_and_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_and_ps(a: __m256, b: __m256) -> __m256 { unsafe { let a: u32x8 = transmute(a); let b: u32x8 = transmute(b); @@ -88,7 +92,8 @@ pub fn _mm256_and_ps(a: __m256, b: __m256) -> __m256 { // See . #[cfg_attr(test, assert_instr(vorp))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_or_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_or_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { let a: u64x4 = transmute(a); let b: u64x4 = transmute(b); @@ -104,7 +109,8 @@ pub fn _mm256_or_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vorps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_or_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_or_ps(a: __m256, b: __m256) -> __m256 { unsafe { let a: u32x8 = transmute(a); let b: u32x8 = transmute(b); @@ -121,7 +127,8 @@ pub fn _mm256_or_ps(a: __m256, b: __m256) -> __m256 { #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_shuffle_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shuffle_pd(a: __m256d, b: __m256d) -> __m256d { static_assert_uimm_bits!(MASK, 8); unsafe { simd_shuffle!( @@ -146,7 +153,8 @@ pub fn _mm256_shuffle_pd(a: __m256d, b: __m256d) -> __m256d { #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_shuffle_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shuffle_ps(a: __m256, b: __m256) -> __m256 { static_assert_uimm_bits!(MASK, 8); unsafe { simd_shuffle!( @@ -174,7 +182,8 @@ pub fn _mm256_shuffle_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vandnp))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_andnot_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_andnot_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { let a: u64x4 = transmute(a); let b: u64x4 = transmute(b); @@ -191,7 +200,8 @@ pub fn _mm256_andnot_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vandnps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_andnot_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_andnot_ps(a: __m256, b: __m256) -> __m256 { unsafe { let a: u32x8 = transmute(a); let b: u32x8 = transmute(b); @@ -255,7 +265,8 @@ pub fn _mm256_min_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmulpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_mul_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mul_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { simd_mul(a, b) } } @@ -267,7 +278,8 @@ pub fn _mm256_mul_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmulps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_mul_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mul_ps(a: __m256, b: __m256) -> __m256 { unsafe { simd_mul(a, b) } } @@ -279,7 +291,8 @@ pub fn _mm256_mul_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vaddsubpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_addsub_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_addsub_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { let a = a.as_f64x4(); let b = b.as_f64x4(); @@ -297,7 +310,8 @@ pub fn _mm256_addsub_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vaddsubps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_addsub_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_addsub_ps(a: __m256, b: __m256) -> __m256 { unsafe { let a = a.as_f32x8(); let b = b.as_f32x8(); @@ -315,7 +329,8 @@ pub fn _mm256_addsub_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vsubpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_sub_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_sub_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { simd_sub(a, b) } } @@ -327,7 +342,8 @@ pub fn _mm256_sub_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vsubps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_sub_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_sub_ps(a: __m256, b: __m256) -> __m256 { unsafe { simd_sub(a, b) } } @@ -339,7 +355,8 @@ pub fn _mm256_sub_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vdivps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_div_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_div_ps(a: __m256, b: __m256) -> __m256 { unsafe { simd_div(a, b) } } @@ -351,7 +368,8 @@ pub fn _mm256_div_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vdivpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_div_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_div_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { simd_div(a, b) } } @@ -386,7 +404,8 @@ pub fn _mm256_round_pd(a: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vroundpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_ceil_pd(a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_ceil_pd(a: __m256d) -> __m256d { unsafe { simd_ceil(a) } } @@ -398,7 +417,8 @@ pub fn _mm256_ceil_pd(a: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vroundpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_floor_pd(a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_floor_pd(a: __m256d) -> __m256d { unsafe { simd_floor(a) } } @@ -433,7 +453,8 @@ pub fn _mm256_round_ps(a: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vroundps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_ceil_ps(a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_ceil_ps(a: __m256) -> __m256 { unsafe { simd_ceil(a) } } @@ -445,7 +466,8 @@ pub fn _mm256_ceil_ps(a: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vroundps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_floor_ps(a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_floor_ps(a: __m256) -> __m256 { unsafe { simd_floor(a) } } @@ -485,7 +507,8 @@ pub fn _mm256_sqrt_pd(a: __m256d) -> __m256d { #[cfg_attr(test, assert_instr(vblendps, IMM4 = 9))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_blend_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_blend_pd(a: __m256d, b: __m256d) -> __m256d { static_assert_uimm_bits!(IMM4, 4); unsafe { simd_shuffle!( @@ -510,7 +533,8 @@ pub fn _mm256_blend_pd(a: __m256d, b: __m256d) -> __m256d { #[cfg_attr(test, assert_instr(vblendps, IMM8 = 9))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_blend_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_blend_ps(a: __m256, b: __m256) -> __m256 { static_assert_uimm_bits!(IMM8, 8); unsafe { simd_shuffle!( @@ -538,7 +562,8 @@ pub fn _mm256_blend_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vblendvpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_blendv_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_blendv_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { let mask: i64x4 = simd_lt(transmute::<_, i64x4>(c), i64x4::ZERO); transmute(simd_select(mask, b.as_f64x4(), a.as_f64x4())) @@ -553,7 +578,8 @@ pub fn _mm256_blendv_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vblendvps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_blendv_ps(a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_blendv_ps(a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { let mask: i32x8 = simd_lt(transmute::<_, i32x8>(c), i32x8::ZERO); transmute(simd_select(mask, b.as_f32x8(), a.as_f32x8())) @@ -586,7 +612,8 @@ pub fn _mm256_dp_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vhaddpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_hadd_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_hadd_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { let even = simd_shuffle!(a, b, [0, 4, 2, 6]); let odd = simd_shuffle!(a, b, [1, 5, 3, 7]); @@ -605,7 +632,8 @@ pub fn _mm256_hadd_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vhaddps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_hadd_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_hadd_ps(a: __m256, b: __m256) -> __m256 { unsafe { let even = simd_shuffle!(a, b, [0, 2, 8, 10, 4, 6, 12, 14]); let odd = simd_shuffle!(a, b, [1, 3, 9, 11, 5, 7, 13, 15]); @@ -623,7 +651,8 @@ pub fn _mm256_hadd_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vhsubpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_hsub_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_hsub_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { let even = simd_shuffle!(a, b, [0, 4, 2, 6]); let odd = simd_shuffle!(a, b, [1, 5, 3, 7]); @@ -642,7 +671,8 @@ pub fn _mm256_hsub_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vhsubps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_hsub_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_hsub_ps(a: __m256, b: __m256) -> __m256 { unsafe { let even = simd_shuffle!(a, b, [0, 2, 8, 10, 4, 6, 12, 14]); let odd = simd_shuffle!(a, b, [1, 3, 9, 11, 5, 7, 13, 15]); @@ -658,7 +688,8 @@ pub fn _mm256_hsub_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vxorp))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_xor_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_xor_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { let a: u64x4 = transmute(a); let b: u64x4 = transmute(b); @@ -674,7 +705,8 @@ pub fn _mm256_xor_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_xor_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_xor_ps(a: __m256, b: __m256) -> __m256 { unsafe { let a: u32x8 = transmute(a); let b: u32x8 = transmute(b); @@ -881,7 +913,8 @@ pub fn _mm_cmp_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepi32_pd(a: __m128i) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepi32_pd(a: __m128i) -> __m256d { unsafe { simd_cast(a.as_i32x4()) } } @@ -893,7 +926,8 @@ pub fn _mm256_cvtepi32_pd(a: __m128i) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepi32_ps(a: __m256i) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepi32_ps(a: __m256i) -> __m256 { unsafe { simd_cast(a.as_i32x8()) } } @@ -905,7 +939,8 @@ pub fn _mm256_cvtepi32_ps(a: __m256i) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvtpd2ps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtpd_ps(a: __m256d) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtpd_ps(a: __m256d) -> __m128 { unsafe { simd_cast(a) } } @@ -929,7 +964,8 @@ pub fn _mm256_cvtps_epi32(a: __m256) -> __m256i { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvtps2pd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtps_pd(a: __m128) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtps_pd(a: __m128) -> __m256d { unsafe { simd_cast(a) } } @@ -940,7 +976,8 @@ pub fn _mm256_cvtps_pd(a: __m128) -> __m256d { #[target_feature(enable = "avx")] //#[cfg_attr(test, assert_instr(movsd))] FIXME #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtsd_f64(a: __m256d) -> f64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtsd_f64(a: __m256d) -> f64 { unsafe { simd_extract!(a, 0) } } @@ -989,7 +1026,8 @@ pub fn _mm256_cvttps_epi32(a: __m256) -> __m256i { #[cfg_attr(test, assert_instr(vextractf128, IMM1 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_extractf128_ps(a: __m256) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extractf128_ps(a: __m256) -> __m128 { static_assert_uimm_bits!(IMM1, 1); unsafe { simd_shuffle!( @@ -1009,7 +1047,8 @@ pub fn _mm256_extractf128_ps(a: __m256) -> __m128 { #[cfg_attr(test, assert_instr(vextractf128, IMM1 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_extractf128_pd(a: __m256d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extractf128_pd(a: __m256d) -> __m128d { static_assert_uimm_bits!(IMM1, 1); unsafe { simd_shuffle!(a, _mm256_undefined_pd(), [[0, 1], [2, 3]][IMM1 as usize]) } } @@ -1022,7 +1061,8 @@ pub fn _mm256_extractf128_pd(a: __m256d) -> __m128d { #[cfg_attr(test, assert_instr(vextractf128, IMM1 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_extractf128_si256(a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extractf128_si256(a: __m256i) -> __m128i { static_assert_uimm_bits!(IMM1, 1); unsafe { let dst: i64x2 = simd_shuffle!(a.as_i64x4(), i64x4::ZERO, [[0, 1], [2, 3]][IMM1 as usize],); @@ -1038,7 +1078,8 @@ pub fn _mm256_extractf128_si256(a: __m256i) -> __m128i { // This intrinsic has no corresponding instruction. #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_extract_epi32(a: __m256i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extract_epi32(a: __m256i) -> i32 { static_assert_uimm_bits!(INDEX, 3); unsafe { simd_extract!(a.as_i32x8(), INDEX as u32) } } @@ -1049,7 +1090,8 @@ pub fn _mm256_extract_epi32(a: __m256i) -> i32 { #[inline] #[target_feature(enable = "avx")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtsi256_si32(a: __m256i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtsi256_si32(a: __m256i) -> i32 { unsafe { simd_extract!(a.as_i32x8(), 0) } } @@ -1109,7 +1151,8 @@ pub fn _mm_permutevar_ps(a: __m128, b: __m128i) -> __m128 { #[cfg_attr(test, assert_instr(vshufps, IMM8 = 9))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_permute_ps(a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_permute_ps(a: __m256) -> __m256 { static_assert_uimm_bits!(IMM8, 8); unsafe { simd_shuffle!( @@ -1138,7 +1181,8 @@ pub fn _mm256_permute_ps(a: __m256) -> __m256 { #[cfg_attr(test, assert_instr(vshufps, IMM8 = 9))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_permute_ps(a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_permute_ps(a: __m128) -> __m128 { static_assert_uimm_bits!(IMM8, 8); unsafe { simd_shuffle!( @@ -1187,7 +1231,8 @@ pub fn _mm_permutevar_pd(a: __m128d, b: __m128i) -> __m128d { #[cfg_attr(test, assert_instr(vshufpd, IMM4 = 0x1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_permute_pd(a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_permute_pd(a: __m256d) -> __m256d { static_assert_uimm_bits!(IMM4, 4); unsafe { simd_shuffle!( @@ -1212,7 +1257,8 @@ pub fn _mm256_permute_pd(a: __m256d) -> __m256d { #[cfg_attr(test, assert_instr(vshufpd, IMM2 = 0x1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_permute_pd(a: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_permute_pd(a: __m128d) -> __m128d { static_assert_uimm_bits!(IMM2, 2); unsafe { simd_shuffle!( @@ -1232,7 +1278,8 @@ pub fn _mm_permute_pd(a: __m128d) -> __m128d { #[cfg_attr(test, assert_instr(vperm2f128, IMM8 = 0x5))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_permute2f128_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_permute2f128_ps(a: __m256, b: __m256) -> __m256 { static_assert_uimm_bits!(IMM8, 8); _mm256_castsi256_ps(_mm256_permute2f128_si256::( _mm256_castps_si256(a), @@ -1249,7 +1296,8 @@ pub fn _mm256_permute2f128_ps(a: __m256, b: __m256) -> __m256 { #[cfg_attr(test, assert_instr(vperm2f128, IMM8 = 0x31))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_permute2f128_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_permute2f128_pd(a: __m256d, b: __m256d) -> __m256d { static_assert_uimm_bits!(IMM8, 8); _mm256_castsi256_pd(_mm256_permute2f128_si256::( _mm256_castpd_si256(a), @@ -1266,7 +1314,8 @@ pub fn _mm256_permute2f128_pd(a: __m256d, b: __m256d) -> __m256 #[cfg_attr(test, assert_instr(vperm2f128, IMM8 = 0x31))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_permute2f128_si256(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_permute2f128_si256(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); const fn idx(imm8: i32, pos: u32) -> u32 { let part = if pos < 2 { @@ -1308,7 +1357,8 @@ pub fn _mm256_permute2f128_si256(a: __m256i, b: __m256i) -> __m #[cfg_attr(test, assert_instr(vbroadcastss))] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::trivially_copy_pass_by_ref)] -pub fn _mm256_broadcast_ss(f: &f32) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcast_ss(f: &f32) -> __m256 { _mm256_set1_ps(*f) } @@ -1321,7 +1371,8 @@ pub fn _mm256_broadcast_ss(f: &f32) -> __m256 { #[cfg_attr(test, assert_instr(vbroadcastss))] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::trivially_copy_pass_by_ref)] -pub fn _mm_broadcast_ss(f: &f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_broadcast_ss(f: &f32) -> __m128 { _mm_set1_ps(*f) } @@ -1334,7 +1385,8 @@ pub fn _mm_broadcast_ss(f: &f32) -> __m128 { #[cfg_attr(test, assert_instr(vbroadcastsd))] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::trivially_copy_pass_by_ref)] -pub fn _mm256_broadcast_sd(f: &f64) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcast_sd(f: &f64) -> __m256d { _mm256_set1_pd(*f) } @@ -1346,7 +1398,8 @@ pub fn _mm256_broadcast_sd(f: &f64) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vbroadcastf128))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_broadcast_ps(a: &__m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcast_ps(a: &__m128) -> __m256 { unsafe { simd_shuffle!(*a, _mm_setzero_ps(), [0, 1, 2, 3, 0, 1, 2, 3]) } } @@ -1358,7 +1411,8 @@ pub fn _mm256_broadcast_ps(a: &__m128) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vbroadcastf128))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_broadcast_pd(a: &__m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcast_pd(a: &__m128d) -> __m256d { unsafe { simd_shuffle!(*a, _mm_setzero_pd(), [0, 1, 0, 1]) } } @@ -1372,7 +1426,8 @@ pub fn _mm256_broadcast_pd(a: &__m128d) -> __m256d { #[cfg_attr(test, assert_instr(vinsertf128, IMM1 = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_insertf128_ps(a: __m256, b: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_insertf128_ps(a: __m256, b: __m128) -> __m256 { static_assert_uimm_bits!(IMM1, 1); unsafe { simd_shuffle!( @@ -1393,7 +1448,8 @@ pub fn _mm256_insertf128_ps(a: __m256, b: __m128) -> __m256 { #[cfg_attr(test, assert_instr(vinsertf128, IMM1 = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_insertf128_pd(a: __m256d, b: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_insertf128_pd(a: __m256d, b: __m128d) -> __m256d { static_assert_uimm_bits!(IMM1, 1); unsafe { simd_shuffle!( @@ -1413,7 +1469,8 @@ pub fn _mm256_insertf128_pd(a: __m256d, b: __m128d) -> __m256d #[cfg_attr(test, assert_instr(vinsertf128, IMM1 = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_insertf128_si256(a: __m256i, b: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_insertf128_si256(a: __m256i, b: __m128i) -> __m256i { static_assert_uimm_bits!(IMM1, 1); unsafe { let dst: i64x4 = simd_shuffle!( @@ -1434,7 +1491,8 @@ pub fn _mm256_insertf128_si256(a: __m256i, b: __m128i) -> __m25 // This intrinsic has no corresponding instruction. #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_insert_epi8(a: __m256i, i: i8) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_insert_epi8(a: __m256i, i: i8) -> __m256i { static_assert_uimm_bits!(INDEX, 5); unsafe { transmute(simd_insert!(a.as_i8x32(), INDEX as u32, i)) } } @@ -1448,7 +1506,8 @@ pub fn _mm256_insert_epi8(a: __m256i, i: i8) -> __m256i { // This intrinsic has no corresponding instruction. #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_insert_epi16(a: __m256i, i: i16) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_insert_epi16(a: __m256i, i: i16) -> __m256i { static_assert_uimm_bits!(INDEX, 4); unsafe { transmute(simd_insert!(a.as_i16x16(), INDEX as u32, i)) } } @@ -1462,7 +1521,8 @@ pub fn _mm256_insert_epi16(a: __m256i, i: i16) -> __m256i { // This intrinsic has no corresponding instruction. #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_insert_epi32(a: __m256i, i: i32) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_insert_epi32(a: __m256i, i: i32) -> __m256i { static_assert_uimm_bits!(INDEX, 3); unsafe { transmute(simd_insert!(a.as_i32x8(), INDEX as u32, i)) } } @@ -1481,7 +1541,8 @@ pub fn _mm256_insert_epi32(a: __m256i, i: i32) -> __m256i { )] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm256_load_pd(mem_addr: *const f64) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_load_pd(mem_addr: *const f64) -> __m256d { *(mem_addr as *const __m256d) } @@ -1499,7 +1560,8 @@ pub unsafe fn _mm256_load_pd(mem_addr: *const f64) -> __m256d { )] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm256_store_pd(mem_addr: *mut f64, a: __m256d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_store_pd(mem_addr: *mut f64, a: __m256d) { *(mem_addr as *mut __m256d) = a; } @@ -1517,7 +1579,8 @@ pub unsafe fn _mm256_store_pd(mem_addr: *mut f64, a: __m256d) { )] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm256_load_ps(mem_addr: *const f32) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_load_ps(mem_addr: *const f32) -> __m256 { *(mem_addr as *const __m256) } @@ -1535,7 +1598,8 @@ pub unsafe fn _mm256_load_ps(mem_addr: *const f32) -> __m256 { )] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm256_store_ps(mem_addr: *mut f32, a: __m256) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_store_ps(mem_addr: *mut f32, a: __m256) { *(mem_addr as *mut __m256) = a; } @@ -1548,7 +1612,8 @@ pub unsafe fn _mm256_store_ps(mem_addr: *mut f32, a: __m256) { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovup))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_loadu_pd(mem_addr: *const f64) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_loadu_pd(mem_addr: *const f64) -> __m256d { let mut dst = _mm256_undefined_pd(); ptr::copy_nonoverlapping( mem_addr as *const u8, @@ -1567,7 +1632,8 @@ pub unsafe fn _mm256_loadu_pd(mem_addr: *const f64) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovup))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_storeu_pd(mem_addr: *mut f64, a: __m256d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_storeu_pd(mem_addr: *mut f64, a: __m256d) { mem_addr.cast::<__m256d>().write_unaligned(a); } @@ -1580,7 +1646,8 @@ pub unsafe fn _mm256_storeu_pd(mem_addr: *mut f64, a: __m256d) { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_loadu_ps(mem_addr: *const f32) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_loadu_ps(mem_addr: *const f32) -> __m256 { let mut dst = _mm256_undefined_ps(); ptr::copy_nonoverlapping( mem_addr as *const u8, @@ -1599,7 +1666,8 @@ pub unsafe fn _mm256_loadu_ps(mem_addr: *const f32) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_storeu_ps(mem_addr: *mut f32, a: __m256) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_storeu_ps(mem_addr: *mut f32, a: __m256) { mem_addr.cast::<__m256>().write_unaligned(a); } @@ -1615,7 +1683,8 @@ pub unsafe fn _mm256_storeu_ps(mem_addr: *mut f32, a: __m256) { assert_instr(vmovaps) )] // FIXME vmovdqa expected #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_load_si256(mem_addr: *const __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_load_si256(mem_addr: *const __m256i) -> __m256i { *mem_addr } @@ -1631,7 +1700,8 @@ pub unsafe fn _mm256_load_si256(mem_addr: *const __m256i) -> __m256i { assert_instr(vmovaps) )] // FIXME vmovdqa expected #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_store_si256(mem_addr: *mut __m256i, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_store_si256(mem_addr: *mut __m256i, a: __m256i) { *mem_addr = a; } @@ -1643,7 +1713,8 @@ pub unsafe fn _mm256_store_si256(mem_addr: *mut __m256i, a: __m256i) { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovdqu expected #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_loadu_si256(mem_addr: *const __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_loadu_si256(mem_addr: *const __m256i) -> __m256i { let mut dst = _mm256_undefined_si256(); ptr::copy_nonoverlapping( mem_addr as *const u8, @@ -1661,7 +1732,8 @@ pub unsafe fn _mm256_loadu_si256(mem_addr: *const __m256i) -> __m256i { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovdqu expected #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_storeu_si256(mem_addr: *mut __m256i, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_storeu_si256(mem_addr: *mut __m256i, a: __m256i) { mem_addr.write_unaligned(a); } @@ -1674,7 +1746,8 @@ pub unsafe fn _mm256_storeu_si256(mem_addr: *mut __m256i, a: __m256i) { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_maskload_pd(mem_addr: *const f64, mask: __m256i) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskload_pd(mem_addr: *const f64, mask: __m256i) -> __m256d { let mask = simd_shr(mask.as_i64x4(), i64x4::splat(63)); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, _mm256_setzero_pd()) } @@ -1687,7 +1760,8 @@ pub unsafe fn _mm256_maskload_pd(mem_addr: *const f64, mask: __m256i) -> __m256d #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_maskstore_pd(mem_addr: *mut f64, mask: __m256i, a: __m256d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskstore_pd(mem_addr: *mut f64, mask: __m256i, a: __m256d) { let mask = simd_shr(mask.as_i64x4(), i64x4::splat(63)); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a) } @@ -1701,7 +1775,8 @@ pub unsafe fn _mm256_maskstore_pd(mem_addr: *mut f64, mask: __m256i, a: __m256d) #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_maskload_pd(mem_addr: *const f64, mask: __m128i) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskload_pd(mem_addr: *const f64, mask: __m128i) -> __m128d { let mask = simd_shr(mask.as_i64x2(), i64x2::splat(63)); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, _mm_setzero_pd()) } @@ -1714,7 +1789,8 @@ pub unsafe fn _mm_maskload_pd(mem_addr: *const f64, mask: __m128i) -> __m128d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_maskstore_pd(mem_addr: *mut f64, mask: __m128i, a: __m128d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskstore_pd(mem_addr: *mut f64, mask: __m128i, a: __m128d) { let mask = simd_shr(mask.as_i64x2(), i64x2::splat(63)); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a) } @@ -1728,7 +1804,8 @@ pub unsafe fn _mm_maskstore_pd(mem_addr: *mut f64, mask: __m128i, a: __m128d) { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_maskload_ps(mem_addr: *const f32, mask: __m256i) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskload_ps(mem_addr: *const f32, mask: __m256i) -> __m256 { let mask = simd_shr(mask.as_i32x8(), i32x8::splat(31)); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, _mm256_setzero_ps()) } @@ -1741,7 +1818,8 @@ pub unsafe fn _mm256_maskload_ps(mem_addr: *const f32, mask: __m256i) -> __m256 #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_maskstore_ps(mem_addr: *mut f32, mask: __m256i, a: __m256) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskstore_ps(mem_addr: *mut f32, mask: __m256i, a: __m256) { let mask = simd_shr(mask.as_i32x8(), i32x8::splat(31)); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a) } @@ -1755,7 +1833,8 @@ pub unsafe fn _mm256_maskstore_ps(mem_addr: *mut f32, mask: __m256i, a: __m256) #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_maskload_ps(mem_addr: *const f32, mask: __m128i) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskload_ps(mem_addr: *const f32, mask: __m128i) -> __m128 { let mask = simd_shr(mask.as_i32x4(), i32x4::splat(31)); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, _mm_setzero_ps()) } @@ -1768,7 +1847,8 @@ pub unsafe fn _mm_maskload_ps(mem_addr: *const f32, mask: __m128i) -> __m128 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_maskstore_ps(mem_addr: *mut f32, mask: __m128i, a: __m128) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskstore_ps(mem_addr: *mut f32, mask: __m128i, a: __m128) { let mask = simd_shr(mask.as_i32x4(), i32x4::splat(31)); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a) } @@ -1781,7 +1861,8 @@ pub unsafe fn _mm_maskstore_ps(mem_addr: *mut f32, mask: __m128i, a: __m128) { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovshdup))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_movehdup_ps(a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movehdup_ps(a: __m256) -> __m256 { unsafe { simd_shuffle!(a, a, [1, 1, 3, 3, 5, 5, 7, 7]) } } @@ -1793,7 +1874,8 @@ pub fn _mm256_movehdup_ps(a: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovsldup))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_moveldup_ps(a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_moveldup_ps(a: __m256) -> __m256 { unsafe { simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]) } } @@ -1805,7 +1887,8 @@ pub fn _mm256_moveldup_ps(a: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovddup))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_movedup_pd(a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movedup_pd(a: __m256d) -> __m256d { unsafe { simd_shuffle!(a, a, [0, 0, 2, 2]) } } @@ -1943,7 +2026,8 @@ pub fn _mm256_rsqrt_ps(a: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vunpckhpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpackhi_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpackhi_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } } @@ -1955,7 +2039,8 @@ pub fn _mm256_unpackhi_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vunpckhps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpackhi_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpackhi_ps(a: __m256, b: __m256) -> __m256 { unsafe { simd_shuffle!(a, b, [2, 10, 3, 11, 6, 14, 7, 15]) } } @@ -1967,7 +2052,8 @@ pub fn _mm256_unpackhi_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vunpcklpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpacklo_pd(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpacklo_pd(a: __m256d, b: __m256d) -> __m256d { unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } } @@ -1979,7 +2065,8 @@ pub fn _mm256_unpacklo_pd(a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vunpcklps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpacklo_ps(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpacklo_ps(a: __m256, b: __m256) -> __m256 { unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 4, 12, 5, 13]) } } @@ -1993,7 +2080,8 @@ pub fn _mm256_unpacklo_ps(a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vptest))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_testz_si256(a: __m256i, b: __m256i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_testz_si256(a: __m256i, b: __m256i) -> i32 { unsafe { let r = simd_and(a.as_i64x4(), b.as_i64x4()); (0i64 == simd_reduce_or(r)) as i32 @@ -2010,7 +2098,8 @@ pub fn _mm256_testz_si256(a: __m256i, b: __m256i) -> i32 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vptest))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_testc_si256(a: __m256i, b: __m256i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_testc_si256(a: __m256i, b: __m256i) -> i32 { unsafe { let r = simd_and(simd_xor(a.as_i64x4(), i64x4::splat(!0)), b.as_i64x4()); (0i64 == simd_reduce_or(r)) as i32 @@ -2097,7 +2186,8 @@ pub fn _mm256_testnzc_pd(a: __m256d, b: __m256d) -> i32 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_testz_pd(a: __m128d, b: __m128d) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_testz_pd(a: __m128d, b: __m128d) -> i32 { unsafe { let r: i64x2 = simd_lt(transmute(_mm_and_pd(a, b)), i64x2::ZERO); (0i64 == simd_reduce_or(r)) as i32 @@ -2117,7 +2207,8 @@ pub fn _mm_testz_pd(a: __m128d, b: __m128d) -> i32 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_testc_pd(a: __m128d, b: __m128d) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_testc_pd(a: __m128d, b: __m128d) -> i32 { unsafe { let r: i64x2 = simd_lt(transmute(_mm_andnot_pd(a, b)), i64x2::ZERO); (0i64 == simd_reduce_or(r)) as i32 @@ -2207,7 +2298,8 @@ pub fn _mm256_testnzc_ps(a: __m256, b: __m256) -> i32 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_testz_ps(a: __m128, b: __m128) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_testz_ps(a: __m128, b: __m128) -> i32 { unsafe { let r: i32x4 = simd_lt(transmute(_mm_and_ps(a, b)), i32x4::ZERO); (0i32 == simd_reduce_or(r)) as i32 @@ -2227,7 +2319,8 @@ pub fn _mm_testz_ps(a: __m128, b: __m128) -> i32 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_testc_ps(a: __m128, b: __m128) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_testc_ps(a: __m128, b: __m128) -> i32 { unsafe { let r: i32x4 = simd_lt(transmute(_mm_andnot_ps(a, b)), i32x4::ZERO); (0i32 == simd_reduce_or(r)) as i32 @@ -2261,12 +2354,13 @@ pub fn _mm_testnzc_ps(a: __m128, b: __m128) -> i32 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovmskpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_movemask_pd(a: __m256d) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movemask_pd(a: __m256d) -> i32 { // Propagate the highest bit to the rest, because simd_bitmask // requires all-1 or all-0. unsafe { let mask: i64x4 = simd_lt(transmute(a), i64x4::ZERO); - simd_bitmask::(mask).into() + simd_bitmask::(mask) as i32 } } @@ -2279,12 +2373,13 @@ pub fn _mm256_movemask_pd(a: __m256d) -> i32 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovmskps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_movemask_ps(a: __m256) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movemask_ps(a: __m256) -> i32 { // Propagate the highest bit to the rest, because simd_bitmask // requires all-1 or all-0. unsafe { let mask: i32x8 = simd_lt(transmute(a), i32x8::ZERO); - simd_bitmask::(mask).into() + simd_bitmask::(mask) as i32 } } @@ -2295,7 +2390,8 @@ pub fn _mm256_movemask_ps(a: __m256) -> i32 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vxorp))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setzero_pd() -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setzero_pd() -> __m256d { const { unsafe { mem::zeroed() } } } @@ -2306,7 +2402,8 @@ pub fn _mm256_setzero_pd() -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setzero_ps() -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setzero_ps() -> __m256 { const { unsafe { mem::zeroed() } } } @@ -2317,7 +2414,8 @@ pub fn _mm256_setzero_ps() -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vxor))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setzero_si256() -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setzero_si256() -> __m256i { const { unsafe { mem::zeroed() } } } @@ -2330,7 +2428,8 @@ pub fn _mm256_setzero_si256() -> __m256i { // This intrinsic has no corresponding instruction. #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d { _mm256_setr_pd(d, c, b, a) } @@ -2342,7 +2441,17 @@ pub fn _mm256_set_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d { #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set_ps(a: f32, b: f32, c: f32, d: f32, e: f32, f: f32, g: f32, h: f32) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set_ps( + a: f32, + b: f32, + c: f32, + d: f32, + e: f32, + f: f32, + g: f32, + h: f32, +) -> __m256 { _mm256_setr_ps(h, g, f, e, d, c, b, a) } @@ -2353,7 +2462,8 @@ pub fn _mm256_set_ps(a: f32, b: f32, c: f32, d: f32, e: f32, f: f32, g: f32, h: #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set_epi8( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set_epi8( e00: i8, e01: i8, e02: i8, @@ -2403,7 +2513,8 @@ pub fn _mm256_set_epi8( #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set_epi16( e00: i16, e01: i16, e02: i16, @@ -2437,7 +2548,8 @@ pub fn _mm256_set_epi16( #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set_epi32( e0: i32, e1: i32, e2: i32, @@ -2457,7 +2569,8 @@ pub fn _mm256_set_epi32( #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i { _mm256_setr_epi64x(d, c, b, a) } @@ -2469,7 +2582,8 @@ pub fn _mm256_set_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i { #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setr_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setr_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d { __m256d([a, b, c, d]) } @@ -2481,7 +2595,17 @@ pub fn _mm256_setr_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d { #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setr_ps(a: f32, b: f32, c: f32, d: f32, e: f32, f: f32, g: f32, h: f32) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setr_ps( + a: f32, + b: f32, + c: f32, + d: f32, + e: f32, + f: f32, + g: f32, + h: f32, +) -> __m256 { __m256([a, b, c, d, e, f, g, h]) } @@ -2493,7 +2617,8 @@ pub fn _mm256_setr_ps(a: f32, b: f32, c: f32, d: f32, e: f32, f: f32, g: f32, h: #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setr_epi8( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setr_epi8( e00: i8, e01: i8, e02: i8, @@ -2546,7 +2671,8 @@ pub fn _mm256_setr_epi8( #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setr_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setr_epi16( e00: i16, e01: i16, e02: i16, @@ -2583,7 +2709,8 @@ pub fn _mm256_setr_epi16( #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setr_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setr_epi32( e0: i32, e1: i32, e2: i32, @@ -2604,7 +2731,8 @@ pub fn _mm256_setr_epi32( #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setr_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setr_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i { unsafe { transmute(i64x4::new(a, b, c, d)) } } @@ -2616,7 +2744,8 @@ pub fn _mm256_setr_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i { #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set1_pd(a: f64) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set1_pd(a: f64) -> __m256d { _mm256_setr_pd(a, a, a, a) } @@ -2628,7 +2757,8 @@ pub fn _mm256_set1_pd(a: f64) -> __m256d { #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set1_ps(a: f32) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set1_ps(a: f32) -> __m256 { _mm256_setr_ps(a, a, a, a, a, a, a, a) } @@ -2640,7 +2770,8 @@ pub fn _mm256_set1_ps(a: f32) -> __m256 { #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set1_epi8(a: i8) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set1_epi8(a: i8) -> __m256i { #[rustfmt::skip] _mm256_setr_epi8( a, a, a, a, a, a, a, a, @@ -2660,7 +2791,8 @@ pub fn _mm256_set1_epi8(a: i8) -> __m256i { #[cfg_attr(test, assert_instr(vinsertf128))] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set1_epi16(a: i16) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set1_epi16(a: i16) -> __m256i { _mm256_setr_epi16(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a) } @@ -2672,7 +2804,8 @@ pub fn _mm256_set1_epi16(a: i16) -> __m256i { #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set1_epi32(a: i32) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set1_epi32(a: i32) -> __m256i { _mm256_setr_epi32(a, a, a, a, a, a, a, a) } @@ -2686,7 +2819,8 @@ pub fn _mm256_set1_epi32(a: i32) -> __m256i { #[cfg_attr(all(test, target_arch = "x86"), assert_instr(vbroadcastsd))] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set1_epi64x(a: i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set1_epi64x(a: i64) -> __m256i { _mm256_setr_epi64x(a, a, a, a) } @@ -2698,7 +2832,8 @@ pub fn _mm256_set1_epi64x(a: i64) -> __m256i { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castpd_ps(a: __m256d) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castpd_ps(a: __m256d) -> __m256 { unsafe { transmute(a) } } @@ -2710,7 +2845,8 @@ pub fn _mm256_castpd_ps(a: __m256d) -> __m256 { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castps_pd(a: __m256) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castps_pd(a: __m256) -> __m256d { unsafe { transmute(a) } } @@ -2722,7 +2858,8 @@ pub fn _mm256_castps_pd(a: __m256) -> __m256d { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castps_si256(a: __m256) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castps_si256(a: __m256) -> __m256i { unsafe { transmute(a) } } @@ -2734,7 +2871,8 @@ pub fn _mm256_castps_si256(a: __m256) -> __m256i { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castsi256_ps(a: __m256i) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castsi256_ps(a: __m256i) -> __m256 { unsafe { transmute(a) } } @@ -2746,7 +2884,8 @@ pub fn _mm256_castsi256_ps(a: __m256i) -> __m256 { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castpd_si256(a: __m256d) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castpd_si256(a: __m256d) -> __m256i { unsafe { transmute(a) } } @@ -2758,7 +2897,8 @@ pub fn _mm256_castpd_si256(a: __m256d) -> __m256i { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castsi256_pd(a: __m256i) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castsi256_pd(a: __m256i) -> __m256d { unsafe { transmute(a) } } @@ -2770,7 +2910,8 @@ pub fn _mm256_castsi256_pd(a: __m256i) -> __m256d { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castps256_ps128(a: __m256) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castps256_ps128(a: __m256) -> __m128 { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } } @@ -2782,7 +2923,8 @@ pub fn _mm256_castps256_ps128(a: __m256) -> __m128 { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castpd256_pd128(a: __m256d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castpd256_pd128(a: __m256d) -> __m128d { unsafe { simd_shuffle!(a, a, [0, 1]) } } @@ -2794,7 +2936,8 @@ pub fn _mm256_castpd256_pd128(a: __m256d) -> __m128d { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castsi256_si128(a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castsi256_si128(a: __m256i) -> __m128i { unsafe { let a = a.as_i64x4(); let dst: i64x2 = simd_shuffle!(a, a, [0, 1]); @@ -2811,7 +2954,8 @@ pub fn _mm256_castsi256_si128(a: __m256i) -> __m128i { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castps128_ps256(a: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castps128_ps256(a: __m128) -> __m256 { unsafe { simd_shuffle!(a, _mm_undefined_ps(), [0, 1, 2, 3, 4, 4, 4, 4]) } } @@ -2824,7 +2968,8 @@ pub fn _mm256_castps128_ps256(a: __m128) -> __m256 { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castpd128_pd256(a: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castpd128_pd256(a: __m128d) -> __m256d { unsafe { simd_shuffle!(a, _mm_undefined_pd(), [0, 1, 2, 2]) } } @@ -2837,7 +2982,8 @@ pub fn _mm256_castpd128_pd256(a: __m128d) -> __m256d { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_castsi128_si256(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castsi128_si256(a: __m128i) -> __m256i { unsafe { let a = a.as_i64x2(); let undefined = i64x2::ZERO; @@ -2856,7 +3002,8 @@ pub fn _mm256_castsi128_si256(a: __m128i) -> __m256i { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_zextps128_ps256(a: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_zextps128_ps256(a: __m128) -> __m256 { unsafe { simd_shuffle!(a, _mm_setzero_ps(), [0, 1, 2, 3, 4, 5, 6, 7]) } } @@ -2870,7 +3017,8 @@ pub fn _mm256_zextps128_ps256(a: __m128) -> __m256 { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_zextsi128_si256(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_zextsi128_si256(a: __m128i) -> __m256i { unsafe { let b = i64x2::ZERO; let dst: i64x4 = simd_shuffle!(a.as_i64x2(), b, [0, 1, 2, 3]); @@ -2889,7 +3037,8 @@ pub fn _mm256_zextsi128_si256(a: __m128i) -> __m256i { // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_zextpd128_pd256(a: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_zextpd128_pd256(a: __m128d) -> __m256d { unsafe { simd_shuffle!(a, _mm_setzero_pd(), [0, 1, 2, 3]) } } @@ -2903,7 +3052,8 @@ pub fn _mm256_zextpd128_pd256(a: __m128d) -> __m256d { #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_undefined_ps() -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_undefined_ps() -> __m256 { const { unsafe { mem::zeroed() } } } @@ -2917,7 +3067,8 @@ pub fn _mm256_undefined_ps() -> __m256 { #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_undefined_pd() -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_undefined_pd() -> __m256d { const { unsafe { mem::zeroed() } } } @@ -2931,7 +3082,8 @@ pub fn _mm256_undefined_pd() -> __m256d { #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_undefined_si256() -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_undefined_si256() -> __m256i { const { unsafe { mem::zeroed() } } } @@ -2942,7 +3094,8 @@ pub fn _mm256_undefined_si256() -> __m256i { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set_m128(hi: __m128, lo: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set_m128(hi: __m128, lo: __m128) -> __m256 { unsafe { simd_shuffle!(lo, hi, [0, 1, 2, 3, 4, 5, 6, 7]) } } @@ -2953,7 +3106,8 @@ pub fn _mm256_set_m128(hi: __m128, lo: __m128) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set_m128d(hi: __m128d, lo: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set_m128d(hi: __m128d, lo: __m128d) -> __m256d { unsafe { let hi: __m128 = transmute(hi); let lo: __m128 = transmute(lo); @@ -2968,7 +3122,8 @@ pub fn _mm256_set_m128d(hi: __m128d, lo: __m128d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_set_m128i(hi: __m128i, lo: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set_m128i(hi: __m128i, lo: __m128i) -> __m256i { unsafe { let hi: __m128 = transmute(hi); let lo: __m128 = transmute(lo); @@ -2983,7 +3138,8 @@ pub fn _mm256_set_m128i(hi: __m128i, lo: __m128i) -> __m256i { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setr_m128(lo: __m128, hi: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setr_m128(lo: __m128, hi: __m128) -> __m256 { _mm256_set_m128(hi, lo) } @@ -2994,7 +3150,8 @@ pub fn _mm256_setr_m128(lo: __m128, hi: __m128) -> __m256 { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setr_m128d(lo: __m128d, hi: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setr_m128d(lo: __m128d, hi: __m128d) -> __m256d { _mm256_set_m128d(hi, lo) } @@ -3005,7 +3162,8 @@ pub fn _mm256_setr_m128d(lo: __m128d, hi: __m128d) -> __m256d { #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_setr_m128i(lo: __m128i, hi: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setr_m128i(lo: __m128i, hi: __m128i) -> __m256i { _mm256_set_m128i(hi, lo) } @@ -3019,7 +3177,8 @@ pub fn _mm256_setr_m128i(lo: __m128i, hi: __m128i) -> __m256i { #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_loadu2_m128(hiaddr: *const f32, loaddr: *const f32) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_loadu2_m128(hiaddr: *const f32, loaddr: *const f32) -> __m256 { let a = _mm256_castps128_ps256(_mm_loadu_ps(loaddr)); _mm256_insertf128_ps::<1>(a, _mm_loadu_ps(hiaddr)) } @@ -3034,7 +3193,8 @@ pub unsafe fn _mm256_loadu2_m128(hiaddr: *const f32, loaddr: *const f32) -> __m2 #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_loadu2_m128d(hiaddr: *const f64, loaddr: *const f64) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_loadu2_m128d(hiaddr: *const f64, loaddr: *const f64) -> __m256d { let a = _mm256_castpd128_pd256(_mm_loadu_pd(loaddr)); _mm256_insertf128_pd::<1>(a, _mm_loadu_pd(hiaddr)) } @@ -3048,7 +3208,8 @@ pub unsafe fn _mm256_loadu2_m128d(hiaddr: *const f64, loaddr: *const f64) -> __m #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_loadu2_m128i(hiaddr: *const __m128i, loaddr: *const __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_loadu2_m128i(hiaddr: *const __m128i, loaddr: *const __m128i) -> __m256i { let a = _mm256_castsi128_si256(_mm_loadu_si128(loaddr)); _mm256_insertf128_si256::<1>(a, _mm_loadu_si128(hiaddr)) } @@ -3063,7 +3224,8 @@ pub unsafe fn _mm256_loadu2_m128i(hiaddr: *const __m128i, loaddr: *const __m128i #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_storeu2_m128(hiaddr: *mut f32, loaddr: *mut f32, a: __m256) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_storeu2_m128(hiaddr: *mut f32, loaddr: *mut f32, a: __m256) { let lo = _mm256_castps256_ps128(a); _mm_storeu_ps(loaddr, lo); let hi = _mm256_extractf128_ps::<1>(a); @@ -3080,7 +3242,8 @@ pub unsafe fn _mm256_storeu2_m128(hiaddr: *mut f32, loaddr: *mut f32, a: __m256) #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_storeu2_m128d(hiaddr: *mut f64, loaddr: *mut f64, a: __m256d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_storeu2_m128d(hiaddr: *mut f64, loaddr: *mut f64, a: __m256d) { let lo = _mm256_castpd256_pd128(a); _mm_storeu_pd(loaddr, lo); let hi = _mm256_extractf128_pd::<1>(a); @@ -3096,7 +3259,8 @@ pub unsafe fn _mm256_storeu2_m128d(hiaddr: *mut f64, loaddr: *mut f64, a: __m256 #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_storeu2_m128i(hiaddr: *mut __m128i, loaddr: *mut __m128i, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_storeu2_m128i(hiaddr: *mut __m128i, loaddr: *mut __m128i, a: __m256i) { let lo = _mm256_castsi256_si128(a); _mm_storeu_si128(loaddr, lo); let hi = _mm256_extractf128_si256::<1>(a); @@ -3110,7 +3274,8 @@ pub unsafe fn _mm256_storeu2_m128i(hiaddr: *mut __m128i, loaddr: *mut __m128i, a #[target_feature(enable = "avx")] //#[cfg_attr(test, assert_instr(movss))] FIXME #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtss_f32(a: __m256) -> f32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtss_f32(a: __m256) -> f32 { unsafe { simd_extract!(a, 0) } } @@ -3191,6 +3356,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use crate::hint::black_box; use crate::ptr; use stdarch_test::simd_test; @@ -3198,7 +3364,7 @@ mod tests { use crate::core_arch::x86::*; #[simd_test(enable = "avx")] - unsafe fn test_mm256_add_pd() { + const unsafe fn test_mm256_add_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_add_pd(a, b); @@ -3207,7 +3373,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_add_ps() { + const unsafe fn test_mm256_add_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_setr_ps(9., 10., 11., 12., 13., 14., 15., 16.); let r = _mm256_add_ps(a, b); @@ -3216,7 +3382,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_and_pd() { + const unsafe fn test_mm256_and_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set1_pd(0.6); let r = _mm256_and_pd(a, b); @@ -3225,7 +3391,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_and_ps() { + const unsafe fn test_mm256_and_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set1_ps(0.6); let r = _mm256_and_ps(a, b); @@ -3234,7 +3400,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_or_pd() { + const unsafe fn test_mm256_or_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set1_pd(0.6); let r = _mm256_or_pd(a, b); @@ -3243,7 +3409,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_or_ps() { + const unsafe fn test_mm256_or_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set1_ps(0.6); let r = _mm256_or_ps(a, b); @@ -3252,7 +3418,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_shuffle_pd() { + const unsafe fn test_mm256_shuffle_pd() { let a = _mm256_setr_pd(1., 4., 5., 8.); let b = _mm256_setr_pd(2., 3., 6., 7.); let r = _mm256_shuffle_pd::<0b11_11_11_11>(a, b); @@ -3261,7 +3427,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_shuffle_ps() { + const unsafe fn test_mm256_shuffle_ps() { let a = _mm256_setr_ps(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm256_setr_ps(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm256_shuffle_ps::<0b00_00_11_11>(a, b); @@ -3270,7 +3436,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_andnot_pd() { + const unsafe fn test_mm256_andnot_pd() { let a = _mm256_set1_pd(0.); let b = _mm256_set1_pd(0.6); let r = _mm256_andnot_pd(a, b); @@ -3278,7 +3444,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_andnot_ps() { + const unsafe fn test_mm256_andnot_ps() { let a = _mm256_set1_ps(0.); let b = _mm256_set1_ps(0.6); let r = _mm256_andnot_ps(a, b); @@ -3390,7 +3556,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_mul_pd() { + const unsafe fn test_mm256_mul_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_mul_pd(a, b); @@ -3399,7 +3565,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_mul_ps() { + const unsafe fn test_mm256_mul_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_setr_ps(9., 10., 11., 12., 13., 14., 15., 16.); let r = _mm256_mul_ps(a, b); @@ -3408,7 +3574,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_addsub_pd() { + const unsafe fn test_mm256_addsub_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_addsub_pd(a, b); @@ -3417,7 +3583,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_addsub_ps() { + const unsafe fn test_mm256_addsub_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 1., 2., 3., 4.); let b = _mm256_setr_ps(5., 6., 7., 8., 5., 6., 7., 8.); let r = _mm256_addsub_ps(a, b); @@ -3426,7 +3592,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_sub_pd() { + const unsafe fn test_mm256_sub_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_sub_pd(a, b); @@ -3435,7 +3601,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_sub_ps() { + const unsafe fn test_mm256_sub_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., -1., -2., -3., -4.); let b = _mm256_setr_ps(5., 6., 7., 8., 3., 2., 1., 0.); let r = _mm256_sub_ps(a, b); @@ -3458,7 +3624,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_floor_pd() { + const unsafe fn test_mm256_floor_pd() { let a = _mm256_setr_pd(1.55, 2.2, 3.99, -1.2); let result_down = _mm256_floor_pd(a); let expected_down = _mm256_setr_pd(1., 2., 3., -2.); @@ -3466,7 +3632,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_ceil_pd() { + const unsafe fn test_mm256_ceil_pd() { let a = _mm256_setr_pd(1.55, 2.2, 3.99, -1.2); let result_up = _mm256_ceil_pd(a); let expected_up = _mm256_setr_pd(2., 3., 4., -1.); @@ -3488,7 +3654,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_floor_ps() { + const unsafe fn test_mm256_floor_ps() { let a = _mm256_setr_ps(1.55, 2.2, 3.99, -1.2, 1.55, 2.2, 3.99, -1.2); let result_down = _mm256_floor_ps(a); let expected_down = _mm256_setr_ps(1., 2., 3., -2., 1., 2., 3., -2.); @@ -3496,7 +3662,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_ceil_ps() { + const unsafe fn test_mm256_ceil_ps() { let a = _mm256_setr_ps(1.55, 2.2, 3.99, -1.2, 1.55, 2.2, 3.99, -1.2); let result_up = _mm256_ceil_ps(a); let expected_up = _mm256_setr_ps(2., 3., 4., -1., 2., 3., 4., -1.); @@ -3520,7 +3686,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_div_ps() { + const unsafe fn test_mm256_div_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let b = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let r = _mm256_div_ps(a, b); @@ -3529,7 +3695,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_div_pd() { + const unsafe fn test_mm256_div_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_div_pd(a, b); @@ -3538,7 +3704,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_blend_pd() { + const unsafe fn test_mm256_blend_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_blend_pd::<0x0>(a, b); @@ -3550,7 +3716,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_blend_ps() { + const unsafe fn test_mm256_blend_ps() { let a = _mm256_setr_ps(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm256_setr_ps(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm256_blend_ps::<0x0>(a, b); @@ -3562,7 +3728,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_blendv_pd() { + const unsafe fn test_mm256_blendv_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_setr_pd(4., 3., 2., 5.); let c = _mm256_setr_pd(0., 0., !0 as f64, !0 as f64); @@ -3572,7 +3738,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_blendv_ps() { + const unsafe fn test_mm256_blendv_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let b = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); #[rustfmt::skip] @@ -3594,7 +3760,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_hadd_pd() { + const unsafe fn test_mm256_hadd_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_hadd_pd(a, b); @@ -3609,7 +3775,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_hadd_ps() { + const unsafe fn test_mm256_hadd_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let b = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let r = _mm256_hadd_ps(a, b); @@ -3624,7 +3790,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_hsub_pd() { + const unsafe fn test_mm256_hsub_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_hsub_pd(a, b); @@ -3639,7 +3805,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_hsub_ps() { + const unsafe fn test_mm256_hsub_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let b = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let r = _mm256_hsub_ps(a, b); @@ -3654,7 +3820,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_xor_pd() { + const unsafe fn test_mm256_xor_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_set1_pd(0.); let r = _mm256_xor_pd(a, b); @@ -3662,7 +3828,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_xor_ps() { + const unsafe fn test_mm256_xor_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let b = _mm256_set1_ps(0.); let r = _mm256_xor_ps(a, b); @@ -3728,7 +3894,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_cvtepi32_pd() { + const unsafe fn test_mm256_cvtepi32_pd() { let a = _mm_setr_epi32(4, 9, 16, 25); let r = _mm256_cvtepi32_pd(a); let e = _mm256_setr_pd(4., 9., 16., 25.); @@ -3736,7 +3902,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_cvtepi32_ps() { + const unsafe fn test_mm256_cvtepi32_ps() { let a = _mm256_setr_epi32(4, 9, 16, 25, 4, 9, 16, 25); let r = _mm256_cvtepi32_ps(a); let e = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); @@ -3744,7 +3910,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_cvtpd_ps() { + const unsafe fn test_mm256_cvtpd_ps() { let a = _mm256_setr_pd(4., 9., 16., 25.); let r = _mm256_cvtpd_ps(a); let e = _mm_setr_ps(4., 9., 16., 25.); @@ -3760,7 +3926,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_cvtps_pd() { + const unsafe fn test_mm256_cvtps_pd() { let a = _mm_setr_ps(4., 9., 16., 25.); let r = _mm256_cvtps_pd(a); let e = _mm256_setr_pd(4., 9., 16., 25.); @@ -3768,7 +3934,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_cvtsd_f64() { + const unsafe fn test_mm256_cvtsd_f64() { let a = _mm256_setr_pd(1., 2., 3., 4.); let r = _mm256_cvtsd_f64(a); assert_eq!(r, 1.); @@ -3799,7 +3965,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_extractf128_ps() { + const unsafe fn test_mm256_extractf128_ps() { let a = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let r = _mm256_extractf128_ps::<0>(a); let e = _mm_setr_ps(4., 3., 2., 5.); @@ -3807,7 +3973,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_extractf128_pd() { + const unsafe fn test_mm256_extractf128_pd() { let a = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_extractf128_pd::<0>(a); let e = _mm_setr_pd(4., 3.); @@ -3815,7 +3981,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_extractf128_si256() { + const unsafe fn test_mm256_extractf128_si256() { let a = _mm256_setr_epi64x(4, 3, 2, 5); let r = _mm256_extractf128_si256::<0>(a); let e = _mm_setr_epi64x(4, 3); @@ -3823,7 +3989,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_extract_epi32() { + const unsafe fn test_mm256_extract_epi32() { let a = _mm256_setr_epi32(-1, 1, 2, 3, 4, 5, 6, 7); let r1 = _mm256_extract_epi32::<0>(a); let r2 = _mm256_extract_epi32::<3>(a); @@ -3832,7 +3998,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_cvtsi256_si32() { + const unsafe fn test_mm256_cvtsi256_si32() { let a = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_cvtsi256_si32(a); assert_eq!(r, 1); @@ -3869,7 +4035,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_permute_ps() { + const unsafe fn test_mm256_permute_ps() { let a = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let r = _mm256_permute_ps::<0x1b>(a); let e = _mm256_setr_ps(5., 2., 3., 4., 50., 64., 9., 8.); @@ -3877,7 +4043,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm_permute_ps() { + const unsafe fn test_mm_permute_ps() { let a = _mm_setr_ps(4., 3., 2., 5.); let r = _mm_permute_ps::<0x1b>(a); let e = _mm_setr_ps(5., 2., 3., 4.); @@ -3903,7 +4069,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_permute_pd() { + const unsafe fn test_mm256_permute_pd() { let a = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_permute_pd::<5>(a); let e = _mm256_setr_pd(3., 4., 5., 2.); @@ -3911,7 +4077,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm_permute_pd() { + const unsafe fn test_mm_permute_pd() { let a = _mm_setr_pd(4., 3.); let r = _mm_permute_pd::<1>(a); let e = _mm_setr_pd(3., 4.); @@ -3919,7 +4085,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_permute2f128_ps() { + const unsafe fn test_mm256_permute2f128_ps() { let a = _mm256_setr_ps(11., 12., 13., 14., 15., 16., 17., 18.); let b = _mm256_setr_ps(21., 22., 23., 24., 25., 26., 27., 28.); let r = _mm256_permute2f128_ps::<0b0001_0011>(a, b); @@ -3933,7 +4099,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_permute2f128_pd() { + const unsafe fn test_mm256_permute2f128_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_permute2f128_pd::<0b0011_0001>(a, b); @@ -3947,7 +4113,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_permute2f128_si256() { + const unsafe fn test_mm256_permute2f128_si256() { let a = _mm256_setr_epi32(11, 12, 13, 14, 15, 16, 17, 18); let b = _mm256_setr_epi32(21, 22, 23, 24, 25, 26, 27, 28); let r = _mm256_permute2f128_si256::<0b0010_0000>(a, b); @@ -3961,28 +4127,28 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_broadcast_ss() { + const unsafe fn test_mm256_broadcast_ss() { let r = _mm256_broadcast_ss(&3.); let e = _mm256_set1_ps(3.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] - unsafe fn test_mm_broadcast_ss() { + const unsafe fn test_mm_broadcast_ss() { let r = _mm_broadcast_ss(&3.); let e = _mm_set1_ps(3.); assert_eq_m128(r, e); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_broadcast_sd() { + const unsafe fn test_mm256_broadcast_sd() { let r = _mm256_broadcast_sd(&3.); let e = _mm256_set1_pd(3.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_broadcast_ps() { + const unsafe fn test_mm256_broadcast_ps() { let a = _mm_setr_ps(4., 3., 2., 5.); let r = _mm256_broadcast_ps(&a); let e = _mm256_setr_ps(4., 3., 2., 5., 4., 3., 2., 5.); @@ -3990,7 +4156,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_broadcast_pd() { + const unsafe fn test_mm256_broadcast_pd() { let a = _mm_setr_pd(4., 3.); let r = _mm256_broadcast_pd(&a); let e = _mm256_setr_pd(4., 3., 4., 3.); @@ -3998,7 +4164,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_insertf128_ps() { + const unsafe fn test_mm256_insertf128_ps() { let a = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let b = _mm_setr_ps(4., 9., 16., 25.); let r = _mm256_insertf128_ps::<0>(a, b); @@ -4007,7 +4173,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_insertf128_pd() { + const unsafe fn test_mm256_insertf128_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm_setr_pd(5., 6.); let r = _mm256_insertf128_pd::<0>(a, b); @@ -4016,7 +4182,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_insertf128_si256() { + const unsafe fn test_mm256_insertf128_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let b = _mm_setr_epi64x(5, 6); let r = _mm256_insertf128_si256::<0>(a, b); @@ -4025,7 +4191,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_insert_epi8() { + const unsafe fn test_mm256_insert_epi8() { #[rustfmt::skip] let a = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, @@ -4045,7 +4211,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_insert_epi16() { + const unsafe fn test_mm256_insert_epi16() { #[rustfmt::skip] let a = _mm256_setr_epi16( 0, 1, 2, 3, 4, 5, 6, 7, @@ -4061,7 +4227,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_insert_epi32() { + const unsafe fn test_mm256_insert_epi32() { let a = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_insert_epi32::<7>(a, 0); let e = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 0); @@ -4069,7 +4235,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_load_pd() { + const unsafe fn test_mm256_load_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let p = ptr::addr_of!(a) as *const f64; let r = _mm256_load_pd(p); @@ -4078,7 +4244,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_store_pd() { + const unsafe fn test_mm256_store_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let mut r = _mm256_undefined_pd(); _mm256_store_pd(ptr::addr_of_mut!(r) as *mut f64, a); @@ -4086,7 +4252,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_load_ps() { + const unsafe fn test_mm256_load_ps() { let a = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let p = ptr::addr_of!(a) as *const f32; let r = _mm256_load_ps(p); @@ -4095,7 +4261,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_store_ps() { + const unsafe fn test_mm256_store_ps() { let a = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let mut r = _mm256_undefined_ps(); _mm256_store_ps(ptr::addr_of_mut!(r) as *mut f32, a); @@ -4103,7 +4269,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_loadu_pd() { + const unsafe fn test_mm256_loadu_pd() { let a = &[1.0f64, 2., 3., 4.]; let p = a.as_ptr(); let r = _mm256_loadu_pd(black_box(p)); @@ -4112,7 +4278,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_storeu_pd() { + const unsafe fn test_mm256_storeu_pd() { let a = _mm256_set1_pd(9.); let mut r = _mm256_undefined_pd(); _mm256_storeu_pd(ptr::addr_of_mut!(r) as *mut f64, a); @@ -4120,7 +4286,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_loadu_ps() { + const unsafe fn test_mm256_loadu_ps() { let a = &[4., 3., 2., 5., 8., 9., 64., 50.]; let p = a.as_ptr(); let r = _mm256_loadu_ps(black_box(p)); @@ -4129,7 +4295,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_storeu_ps() { + const unsafe fn test_mm256_storeu_ps() { let a = _mm256_set1_ps(9.); let mut r = _mm256_undefined_ps(); _mm256_storeu_ps(ptr::addr_of_mut!(r) as *mut f32, a); @@ -4137,7 +4303,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_load_si256() { + const unsafe fn test_mm256_load_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let p = ptr::addr_of!(a); let r = _mm256_load_si256(p); @@ -4146,7 +4312,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_store_si256() { + const unsafe fn test_mm256_store_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let mut r = _mm256_undefined_si256(); _mm256_store_si256(ptr::addr_of_mut!(r), a); @@ -4154,7 +4320,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_loadu_si256() { + const unsafe fn test_mm256_loadu_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let p = ptr::addr_of!(a); let r = _mm256_loadu_si256(black_box(p)); @@ -4163,7 +4329,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_storeu_si256() { + const unsafe fn test_mm256_storeu_si256() { let a = _mm256_set1_epi8(9); let mut r = _mm256_undefined_si256(); _mm256_storeu_si256(ptr::addr_of_mut!(r), a); @@ -4171,7 +4337,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_maskload_pd() { + const unsafe fn test_mm256_maskload_pd() { let a = &[1.0f64, 2., 3., 4.]; let p = a.as_ptr(); let mask = _mm256_setr_epi64x(0, !0, 0, !0); @@ -4181,7 +4347,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_maskstore_pd() { + const unsafe fn test_mm256_maskstore_pd() { let mut r = _mm256_set1_pd(0.); let mask = _mm256_setr_epi64x(0, !0, 0, !0); let a = _mm256_setr_pd(1., 2., 3., 4.); @@ -4191,7 +4357,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm_maskload_pd() { + const unsafe fn test_mm_maskload_pd() { let a = &[1.0f64, 2.]; let p = a.as_ptr(); let mask = _mm_setr_epi64x(0, !0); @@ -4201,7 +4367,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm_maskstore_pd() { + const unsafe fn test_mm_maskstore_pd() { let mut r = _mm_set1_pd(0.); let mask = _mm_setr_epi64x(0, !0); let a = _mm_setr_pd(1., 2.); @@ -4211,7 +4377,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_maskload_ps() { + const unsafe fn test_mm256_maskload_ps() { let a = &[1.0f32, 2., 3., 4., 5., 6., 7., 8.]; let p = a.as_ptr(); let mask = _mm256_setr_epi32(0, !0, 0, !0, 0, !0, 0, !0); @@ -4221,7 +4387,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_maskstore_ps() { + const unsafe fn test_mm256_maskstore_ps() { let mut r = _mm256_set1_ps(0.); let mask = _mm256_setr_epi32(0, !0, 0, !0, 0, !0, 0, !0); let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); @@ -4231,7 +4397,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm_maskload_ps() { + const unsafe fn test_mm_maskload_ps() { let a = &[1.0f32, 2., 3., 4.]; let p = a.as_ptr(); let mask = _mm_setr_epi32(0, !0, 0, !0); @@ -4241,7 +4407,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm_maskstore_ps() { + const unsafe fn test_mm_maskstore_ps() { let mut r = _mm_set1_ps(0.); let mask = _mm_setr_epi32(0, !0, 0, !0); let a = _mm_setr_ps(1., 2., 3., 4.); @@ -4251,7 +4417,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_movehdup_ps() { + const unsafe fn test_mm256_movehdup_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_movehdup_ps(a); let e = _mm256_setr_ps(2., 2., 4., 4., 6., 6., 8., 8.); @@ -4259,7 +4425,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_moveldup_ps() { + const unsafe fn test_mm256_moveldup_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_moveldup_ps(a); let e = _mm256_setr_ps(1., 1., 3., 3., 5., 5., 7., 7.); @@ -4267,7 +4433,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_movedup_pd() { + const unsafe fn test_mm256_movedup_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let r = _mm256_movedup_pd(a); let e = _mm256_setr_pd(1., 1., 3., 3.); @@ -4370,7 +4536,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_unpackhi_pd() { + const unsafe fn test_mm256_unpackhi_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_unpackhi_pd(a, b); @@ -4379,7 +4545,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_unpackhi_ps() { + const unsafe fn test_mm256_unpackhi_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_setr_ps(9., 10., 11., 12., 13., 14., 15., 16.); let r = _mm256_unpackhi_ps(a, b); @@ -4388,7 +4554,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_unpacklo_pd() { + const unsafe fn test_mm256_unpacklo_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_unpacklo_pd(a, b); @@ -4397,7 +4563,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_unpacklo_ps() { + const unsafe fn test_mm256_unpacklo_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_setr_ps(9., 10., 11., 12., 13., 14., 15., 16.); let r = _mm256_unpacklo_ps(a, b); @@ -4406,7 +4572,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_testz_si256() { + const unsafe fn test_mm256_testz_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let b = _mm256_setr_epi64x(5, 6, 7, 8); let r = _mm256_testz_si256(a, b); @@ -4417,7 +4583,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_testc_si256() { + const unsafe fn test_mm256_testc_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let b = _mm256_setr_epi64x(5, 6, 7, 8); let r = _mm256_testc_si256(a, b); @@ -4475,7 +4641,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm_testz_pd() { + const unsafe fn test_mm_testz_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 6.); let r = _mm_testz_pd(a, b); @@ -4486,7 +4652,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm_testc_pd() { + const unsafe fn test_mm_testc_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 6.); let r = _mm_testc_pd(a, b); @@ -4541,7 +4707,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm_testz_ps() { + const unsafe fn test_mm_testz_ps() { let a = _mm_set1_ps(1.); let r = _mm_testz_ps(a, a); assert_eq!(r, 1); @@ -4551,7 +4717,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm_testc_ps() { + const unsafe fn test_mm_testc_ps() { let a = _mm_set1_ps(1.); let r = _mm_testc_ps(a, a); assert_eq!(r, 1); @@ -4572,51 +4738,51 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_movemask_pd() { + const unsafe fn test_mm256_movemask_pd() { let a = _mm256_setr_pd(1., -2., 3., -4.); let r = _mm256_movemask_pd(a); assert_eq!(r, 0xA); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_movemask_ps() { + const unsafe fn test_mm256_movemask_ps() { let a = _mm256_setr_ps(1., -2., 3., -4., 1., -2., 3., -4.); let r = _mm256_movemask_ps(a); assert_eq!(r, 0xAA); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setzero_pd() { + const unsafe fn test_mm256_setzero_pd() { let r = _mm256_setzero_pd(); assert_eq_m256d(r, _mm256_set1_pd(0.)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setzero_ps() { + const unsafe fn test_mm256_setzero_ps() { let r = _mm256_setzero_ps(); assert_eq_m256(r, _mm256_set1_ps(0.)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setzero_si256() { + const unsafe fn test_mm256_setzero_si256() { let r = _mm256_setzero_si256(); assert_eq_m256i(r, _mm256_set1_epi8(0)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set_pd() { + const unsafe fn test_mm256_set_pd() { let r = _mm256_set_pd(1., 2., 3., 4.); assert_eq_m256d(r, _mm256_setr_pd(4., 3., 2., 1.)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set_ps() { + const unsafe fn test_mm256_set_ps() { let r = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); assert_eq_m256(r, _mm256_setr_ps(8., 7., 6., 5., 4., 3., 2., 1.)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set_epi8() { + const unsafe fn test_mm256_set_epi8() { #[rustfmt::skip] let r = _mm256_set_epi8( 1, 2, 3, 4, 5, 6, 7, 8, @@ -4635,7 +4801,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set_epi16() { + const unsafe fn test_mm256_set_epi16() { #[rustfmt::skip] let r = _mm256_set_epi16( 1, 2, 3, 4, 5, 6, 7, 8, @@ -4650,31 +4816,31 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set_epi32() { + const unsafe fn test_mm256_set_epi32() { let r = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); assert_eq_m256i(r, _mm256_setr_epi32(8, 7, 6, 5, 4, 3, 2, 1)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set_epi64x() { + const unsafe fn test_mm256_set_epi64x() { let r = _mm256_set_epi64x(1, 2, 3, 4); assert_eq_m256i(r, _mm256_setr_epi64x(4, 3, 2, 1)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setr_pd() { + const unsafe fn test_mm256_setr_pd() { let r = _mm256_setr_pd(1., 2., 3., 4.); assert_eq_m256d(r, _mm256_setr_pd(1., 2., 3., 4.)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setr_ps() { + const unsafe fn test_mm256_setr_ps() { let r = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); assert_eq_m256(r, _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setr_epi8() { + const unsafe fn test_mm256_setr_epi8() { #[rustfmt::skip] let r = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, @@ -4694,7 +4860,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setr_epi16() { + const unsafe fn test_mm256_setr_epi16() { #[rustfmt::skip] let r = _mm256_setr_epi16( 1, 2, 3, 4, 5, 6, 7, 8, @@ -4709,55 +4875,55 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setr_epi32() { + const unsafe fn test_mm256_setr_epi32() { let r = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); assert_eq_m256i(r, _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setr_epi64x() { + const unsafe fn test_mm256_setr_epi64x() { let r = _mm256_setr_epi64x(1, 2, 3, 4); assert_eq_m256i(r, _mm256_setr_epi64x(1, 2, 3, 4)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set1_pd() { + const unsafe fn test_mm256_set1_pd() { let r = _mm256_set1_pd(1.); assert_eq_m256d(r, _mm256_set1_pd(1.)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set1_ps() { + const unsafe fn test_mm256_set1_ps() { let r = _mm256_set1_ps(1.); assert_eq_m256(r, _mm256_set1_ps(1.)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set1_epi8() { + const unsafe fn test_mm256_set1_epi8() { let r = _mm256_set1_epi8(1); assert_eq_m256i(r, _mm256_set1_epi8(1)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set1_epi16() { + const unsafe fn test_mm256_set1_epi16() { let r = _mm256_set1_epi16(1); assert_eq_m256i(r, _mm256_set1_epi16(1)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set1_epi32() { + const unsafe fn test_mm256_set1_epi32() { let r = _mm256_set1_epi32(1); assert_eq_m256i(r, _mm256_set1_epi32(1)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set1_epi64x() { + const unsafe fn test_mm256_set1_epi64x() { let r = _mm256_set1_epi64x(1); assert_eq_m256i(r, _mm256_set1_epi64x(1)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castpd_ps() { + const unsafe fn test_mm256_castpd_ps() { let a = _mm256_setr_pd(1., 2., 3., 4.); let r = _mm256_castpd_ps(a); let e = _mm256_setr_ps(0., 1.875, 0., 2., 0., 2.125, 0., 2.25); @@ -4765,7 +4931,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castps_pd() { + const unsafe fn test_mm256_castps_pd() { let a = _mm256_setr_ps(0., 1.875, 0., 2., 0., 2.125, 0., 2.25); let r = _mm256_castps_pd(a); let e = _mm256_setr_pd(1., 2., 3., 4.); @@ -4773,7 +4939,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castps_si256() { + const unsafe fn test_mm256_castps_si256() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_castps_si256(a); #[rustfmt::skip] @@ -4787,7 +4953,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castsi256_ps() { + const unsafe fn test_mm256_castsi256_ps() { #[rustfmt::skip] let a = _mm256_setr_epi8( 0, 0, -128, 63, 0, 0, 0, 64, @@ -4801,63 +4967,63 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castpd_si256() { + const unsafe fn test_mm256_castpd_si256() { let a = _mm256_setr_pd(1., 2., 3., 4.); let r = _mm256_castpd_si256(a); assert_eq_m256d(transmute(r), a); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castsi256_pd() { + const unsafe fn test_mm256_castsi256_pd() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let r = _mm256_castsi256_pd(a); assert_eq_m256d(r, transmute(a)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castps256_ps128() { + const unsafe fn test_mm256_castps256_ps128() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_castps256_ps128(a); assert_eq_m128(r, _mm_setr_ps(1., 2., 3., 4.)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castpd256_pd128() { + const unsafe fn test_mm256_castpd256_pd128() { let a = _mm256_setr_pd(1., 2., 3., 4.); let r = _mm256_castpd256_pd128(a); assert_eq_m128d(r, _mm_setr_pd(1., 2.)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castsi256_si128() { + const unsafe fn test_mm256_castsi256_si128() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let r = _mm256_castsi256_si128(a); assert_eq_m128i(r, _mm_setr_epi64x(1, 2)); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castps128_ps256() { + const unsafe fn test_mm256_castps128_ps256() { let a = _mm_setr_ps(1., 2., 3., 4.); let r = _mm256_castps128_ps256(a); assert_eq_m128(_mm256_castps256_ps128(r), a); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castpd128_pd256() { + const unsafe fn test_mm256_castpd128_pd256() { let a = _mm_setr_pd(1., 2.); let r = _mm256_castpd128_pd256(a); assert_eq_m128d(_mm256_castpd256_pd128(r), a); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_castsi128_si256() { + const unsafe fn test_mm256_castsi128_si256() { let a = _mm_setr_epi32(1, 2, 3, 4); let r = _mm256_castsi128_si256(a); assert_eq_m128i(_mm256_castsi256_si128(r), a); } #[simd_test(enable = "avx")] - unsafe fn test_mm256_zextps128_ps256() { + const unsafe fn test_mm256_zextps128_ps256() { let a = _mm_setr_ps(1., 2., 3., 4.); let r = _mm256_zextps128_ps256(a); let e = _mm256_setr_ps(1., 2., 3., 4., 0., 0., 0., 0.); @@ -4865,7 +5031,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_zextsi128_si256() { + const unsafe fn test_mm256_zextsi128_si256() { let a = _mm_setr_epi64x(1, 2); let r = _mm256_zextsi128_si256(a); let e = _mm256_setr_epi64x(1, 2, 0, 0); @@ -4873,7 +5039,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_zextpd128_pd256() { + const unsafe fn test_mm256_zextpd128_pd256() { let a = _mm_setr_pd(1., 2.); let r = _mm256_zextpd128_pd256(a); let e = _mm256_setr_pd(1., 2., 0., 0.); @@ -4881,7 +5047,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set_m128() { + const unsafe fn test_mm256_set_m128() { let hi = _mm_setr_ps(5., 6., 7., 8.); let lo = _mm_setr_ps(1., 2., 3., 4.); let r = _mm256_set_m128(hi, lo); @@ -4890,7 +5056,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set_m128d() { + const unsafe fn test_mm256_set_m128d() { let hi = _mm_setr_pd(3., 4.); let lo = _mm_setr_pd(1., 2.); let r = _mm256_set_m128d(hi, lo); @@ -4899,7 +5065,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_set_m128i() { + const unsafe fn test_mm256_set_m128i() { #[rustfmt::skip] let hi = _mm_setr_epi8( 17, 18, 19, 20, @@ -4926,7 +5092,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setr_m128() { + const unsafe fn test_mm256_setr_m128() { let lo = _mm_setr_ps(1., 2., 3., 4.); let hi = _mm_setr_ps(5., 6., 7., 8.); let r = _mm256_setr_m128(lo, hi); @@ -4935,7 +5101,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setr_m128d() { + const unsafe fn test_mm256_setr_m128d() { let lo = _mm_setr_pd(1., 2.); let hi = _mm_setr_pd(3., 4.); let r = _mm256_setr_m128d(lo, hi); @@ -4944,7 +5110,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_setr_m128i() { + const unsafe fn test_mm256_setr_m128i() { #[rustfmt::skip] let lo = _mm_setr_epi8( 1, 2, 3, 4, @@ -4969,7 +5135,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_loadu2_m128() { + const unsafe fn test_mm256_loadu2_m128() { let hi = &[5., 6., 7., 8.]; let hiaddr = hi.as_ptr(); let lo = &[1., 2., 3., 4.]; @@ -4980,7 +5146,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_loadu2_m128d() { + const unsafe fn test_mm256_loadu2_m128d() { let hi = &[3., 4.]; let hiaddr = hi.as_ptr(); let lo = &[1., 2.]; @@ -4991,7 +5157,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_loadu2_m128i() { + const unsafe fn test_mm256_loadu2_m128i() { #[rustfmt::skip] let hi = _mm_setr_epi8( 17, 18, 19, 20, 21, 22, 23, 24, @@ -5014,7 +5180,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_storeu2_m128() { + const unsafe fn test_mm256_storeu2_m128() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let mut hi = _mm_undefined_ps(); let mut lo = _mm_undefined_ps(); @@ -5028,7 +5194,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_storeu2_m128d() { + const unsafe fn test_mm256_storeu2_m128d() { let a = _mm256_setr_pd(1., 2., 3., 4.); let mut hi = _mm_undefined_pd(); let mut lo = _mm_undefined_pd(); @@ -5042,7 +5208,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_storeu2_m128i() { + const unsafe fn test_mm256_storeu2_m128i() { #[rustfmt::skip] let a = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, @@ -5069,7 +5235,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_cvtss_f32() { + const unsafe fn test_mm256_cvtss_f32() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_cvtss_f32(a); assert_eq!(r, 1.); diff --git a/crates/core_arch/src/x86/avx2.rs b/crates/core_arch/src/x86/avx2.rs index 3734915b70..b3b2accd73 100644 --- a/crates/core_arch/src/x86/avx2.rs +++ b/crates/core_arch/src/x86/avx2.rs @@ -31,7 +31,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpabsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_abs_epi32(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_abs_epi32(a: __m256i) -> __m256i { unsafe { let a = a.as_i32x8(); let r = simd_select::(simd_lt(a, i32x8::ZERO), simd_neg(a), a); @@ -46,7 +47,8 @@ pub fn _mm256_abs_epi32(a: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpabsw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_abs_epi16(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_abs_epi16(a: __m256i) -> __m256i { unsafe { let a = a.as_i16x16(); let r = simd_select::(simd_lt(a, i16x16::ZERO), simd_neg(a), a); @@ -61,7 +63,8 @@ pub fn _mm256_abs_epi16(a: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpabsb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_abs_epi8(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_abs_epi8(a: __m256i) -> __m256i { unsafe { let a = a.as_i8x32(); let r = simd_select::(simd_lt(a, i8x32::ZERO), simd_neg(a), a); @@ -76,7 +79,8 @@ pub fn _mm256_abs_epi8(a: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpaddq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_add_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_add_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_add(a.as_i64x4(), b.as_i64x4())) } } @@ -87,7 +91,8 @@ pub fn _mm256_add_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpaddd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_add_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_add_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_add(a.as_i32x8(), b.as_i32x8())) } } @@ -98,7 +103,8 @@ pub fn _mm256_add_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpaddw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_add_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_add_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_add(a.as_i16x16(), b.as_i16x16())) } } @@ -109,7 +115,8 @@ pub fn _mm256_add_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpaddb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_add_epi8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_add_epi8(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_add(a.as_i8x32(), b.as_i8x32())) } } @@ -120,7 +127,8 @@ pub fn _mm256_add_epi8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpaddsb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_adds_epi8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_adds_epi8(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_saturating_add(a.as_i8x32(), b.as_i8x32())) } } @@ -131,7 +139,8 @@ pub fn _mm256_adds_epi8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpaddsw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_adds_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_adds_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_saturating_add(a.as_i16x16(), b.as_i16x16())) } } @@ -142,7 +151,8 @@ pub fn _mm256_adds_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpaddusb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_adds_epu8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_adds_epu8(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_saturating_add(a.as_u8x32(), b.as_u8x32())) } } @@ -153,7 +163,8 @@ pub fn _mm256_adds_epu8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpaddusw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_adds_epu16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_adds_epu16(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_saturating_add(a.as_u16x16(), b.as_u16x16())) } } @@ -166,7 +177,8 @@ pub fn _mm256_adds_epu16(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 7))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_alignr_epi8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_alignr_epi8(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); // If palignr is shifting the pair of vectors more than the size of two @@ -247,7 +259,8 @@ pub fn _mm256_alignr_epi8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vandps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_and_si256(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_and_si256(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_and(a.as_i64x4(), b.as_i64x4())) } } @@ -259,7 +272,8 @@ pub fn _mm256_and_si256(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vandnps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_andnot_si256(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_andnot_si256(a: __m256i, b: __m256i) -> __m256i { unsafe { let all_ones = _mm256_set1_epi8(-1); transmute(simd_and( @@ -276,7 +290,8 @@ pub fn _mm256_andnot_si256(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpavgw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_avg_epu16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_avg_epu16(a: __m256i, b: __m256i) -> __m256i { unsafe { let a = simd_cast::<_, u32x16>(a.as_u16x16()); let b = simd_cast::<_, u32x16>(b.as_u16x16()); @@ -292,7 +307,8 @@ pub fn _mm256_avg_epu16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpavgb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_avg_epu8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_avg_epu8(a: __m256i, b: __m256i) -> __m256i { unsafe { let a = simd_cast::<_, u16x32>(a.as_u8x32()); let b = simd_cast::<_, u16x32>(b.as_u8x32()); @@ -309,7 +325,8 @@ pub fn _mm256_avg_epu8(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vblendps, IMM4 = 9))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_blend_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_blend_epi32(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM4, 4); unsafe { let a = a.as_i32x4(); @@ -336,7 +353,8 @@ pub fn _mm_blend_epi32(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(vblendps, IMM8 = 9))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_blend_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_blend_epi32(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); unsafe { let a = a.as_i32x8(); @@ -367,7 +385,8 @@ pub fn _mm256_blend_epi32(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpblendw, IMM8 = 9))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_blend_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_blend_epi16(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); unsafe { let a = a.as_i16x16(); @@ -406,7 +425,8 @@ pub fn _mm256_blend_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpblendvb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_blendv_epi8(a: __m256i, b: __m256i, mask: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_blendv_epi8(a: __m256i, b: __m256i, mask: __m256i) -> __m256i { unsafe { let mask: i8x32 = simd_lt(mask.as_i8x32(), i8x32::ZERO); transmute(simd_select(mask, b.as_i8x32(), a.as_i8x32())) @@ -421,7 +441,8 @@ pub fn _mm256_blendv_epi8(a: __m256i, b: __m256i, mask: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpbroadcastb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_broadcastb_epi8(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_broadcastb_epi8(a: __m128i) -> __m128i { unsafe { let ret = simd_shuffle!(a.as_i8x16(), i8x16::ZERO, [0_u32; 16]); transmute::(ret) @@ -436,7 +457,8 @@ pub fn _mm_broadcastb_epi8(a: __m128i) -> __m128i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpbroadcastb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_broadcastb_epi8(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcastb_epi8(a: __m128i) -> __m256i { unsafe { let ret = simd_shuffle!(a.as_i8x16(), i8x16::ZERO, [0_u32; 32]); transmute::(ret) @@ -453,7 +475,8 @@ pub fn _mm256_broadcastb_epi8(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vbroadcastss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_broadcastd_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_broadcastd_epi32(a: __m128i) -> __m128i { unsafe { let ret = simd_shuffle!(a.as_i32x4(), i32x4::ZERO, [0_u32; 4]); transmute::(ret) @@ -470,7 +493,8 @@ pub fn _mm_broadcastd_epi32(a: __m128i) -> __m128i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vbroadcastss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_broadcastd_epi32(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcastd_epi32(a: __m128i) -> __m256i { unsafe { let ret = simd_shuffle!(a.as_i32x4(), i32x4::ZERO, [0_u32; 8]); transmute::(ret) @@ -487,7 +511,8 @@ pub fn _mm256_broadcastd_epi32(a: __m128i) -> __m256i { // See https://github.com/rust-lang/stdarch/issues/791 #[cfg_attr(test, assert_instr(vmovddup))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_broadcastq_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_broadcastq_epi64(a: __m128i) -> __m128i { unsafe { let ret = simd_shuffle!(a.as_i64x2(), a.as_i64x2(), [0_u32; 2]); transmute::(ret) @@ -502,7 +527,8 @@ pub fn _mm_broadcastq_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vbroadcastsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_broadcastq_epi64(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcastq_epi64(a: __m128i) -> __m256i { unsafe { let ret = simd_shuffle!(a.as_i64x2(), a.as_i64x2(), [0_u32; 4]); transmute::(ret) @@ -517,7 +543,8 @@ pub fn _mm256_broadcastq_epi64(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vmovddup))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_broadcastsd_pd(a: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_broadcastsd_pd(a: __m128d) -> __m128d { unsafe { simd_shuffle!(a, _mm_setzero_pd(), [0_u32; 2]) } } @@ -529,7 +556,8 @@ pub fn _mm_broadcastsd_pd(a: __m128d) -> __m128d { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vbroadcastsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_broadcastsd_pd(a: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcastsd_pd(a: __m128d) -> __m256d { unsafe { simd_shuffle!(a, _mm_setzero_pd(), [0_u32; 4]) } } @@ -540,7 +568,8 @@ pub fn _mm256_broadcastsd_pd(a: __m128d) -> __m256d { #[inline] #[target_feature(enable = "avx2")] #[stable(feature = "simd_x86_updates", since = "1.82.0")] -pub fn _mm_broadcastsi128_si256(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_broadcastsi128_si256(a: __m128i) -> __m256i { unsafe { let ret = simd_shuffle!(a.as_i64x2(), i64x2::ZERO, [0, 1, 0, 1]); transmute::(ret) @@ -556,7 +585,8 @@ pub fn _mm_broadcastsi128_si256(a: __m128i) -> __m256i { #[inline] #[target_feature(enable = "avx2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_broadcastsi128_si256(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcastsi128_si256(a: __m128i) -> __m256i { unsafe { let ret = simd_shuffle!(a.as_i64x2(), i64x2::ZERO, [0, 1, 0, 1]); transmute::(ret) @@ -571,7 +601,8 @@ pub fn _mm256_broadcastsi128_si256(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vbroadcastss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_broadcastss_ps(a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_broadcastss_ps(a: __m128) -> __m128 { unsafe { simd_shuffle!(a, _mm_setzero_ps(), [0_u32; 4]) } } @@ -583,7 +614,8 @@ pub fn _mm_broadcastss_ps(a: __m128) -> __m128 { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vbroadcastss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_broadcastss_ps(a: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcastss_ps(a: __m128) -> __m256 { unsafe { simd_shuffle!(a, _mm_setzero_ps(), [0_u32; 8]) } } @@ -595,7 +627,8 @@ pub fn _mm256_broadcastss_ps(a: __m128) -> __m256 { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpbroadcastw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_broadcastw_epi16(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_broadcastw_epi16(a: __m128i) -> __m128i { unsafe { let ret = simd_shuffle!(a.as_i16x8(), i16x8::ZERO, [0_u32; 8]); transmute::(ret) @@ -610,7 +643,8 @@ pub fn _mm_broadcastw_epi16(a: __m128i) -> __m128i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpbroadcastw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_broadcastw_epi16(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcastw_epi16(a: __m128i) -> __m256i { unsafe { let ret = simd_shuffle!(a.as_i16x8(), i16x8::ZERO, [0_u32; 16]); transmute::(ret) @@ -624,7 +658,8 @@ pub fn _mm256_broadcastw_epi16(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpcmpeqq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cmpeq_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute::(simd_eq(a.as_i64x4(), b.as_i64x4())) } } @@ -635,7 +670,8 @@ pub fn _mm256_cmpeq_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpcmpeqd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cmpeq_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute::(simd_eq(a.as_i32x8(), b.as_i32x8())) } } @@ -646,7 +682,8 @@ pub fn _mm256_cmpeq_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpcmpeqw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cmpeq_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute::(simd_eq(a.as_i16x16(), b.as_i16x16())) } } @@ -657,7 +694,8 @@ pub fn _mm256_cmpeq_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpcmpeqb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cmpeq_epi8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epi8(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute::(simd_eq(a.as_i8x32(), b.as_i8x32())) } } @@ -668,7 +706,8 @@ pub fn _mm256_cmpeq_epi8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpcmpgtq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cmpgt_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute::(simd_gt(a.as_i64x4(), b.as_i64x4())) } } @@ -679,7 +718,8 @@ pub fn _mm256_cmpgt_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpcmpgtd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cmpgt_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute::(simd_gt(a.as_i32x8(), b.as_i32x8())) } } @@ -690,7 +730,8 @@ pub fn _mm256_cmpgt_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpcmpgtw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cmpgt_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute::(simd_gt(a.as_i16x16(), b.as_i16x16())) } } @@ -701,7 +742,8 @@ pub fn _mm256_cmpgt_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpcmpgtb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cmpgt_epi8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epi8(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute::(simd_gt(a.as_i8x32(), b.as_i8x32())) } } @@ -712,7 +754,8 @@ pub fn _mm256_cmpgt_epi8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovsxwd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepi16_epi32(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepi16_epi32(a: __m128i) -> __m256i { unsafe { transmute::(simd_cast(a.as_i16x8())) } } @@ -723,7 +766,8 @@ pub fn _mm256_cvtepi16_epi32(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovsxwq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepi16_epi64(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepi16_epi64(a: __m128i) -> __m256i { unsafe { let a = a.as_i16x8(); let v64: i16x4 = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -738,7 +782,8 @@ pub fn _mm256_cvtepi16_epi64(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovsxdq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepi32_epi64(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepi32_epi64(a: __m128i) -> __m256i { unsafe { transmute::(simd_cast(a.as_i32x4())) } } @@ -749,7 +794,8 @@ pub fn _mm256_cvtepi32_epi64(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovsxbw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepi8_epi16(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepi8_epi16(a: __m128i) -> __m256i { unsafe { transmute::(simd_cast(a.as_i8x16())) } } @@ -760,7 +806,8 @@ pub fn _mm256_cvtepi8_epi16(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovsxbd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepi8_epi32(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepi8_epi32(a: __m128i) -> __m256i { unsafe { let a = a.as_i8x16(); let v64: i8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -775,7 +822,8 @@ pub fn _mm256_cvtepi8_epi32(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovsxbq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepi8_epi64(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepi8_epi64(a: __m128i) -> __m256i { unsafe { let a = a.as_i8x16(); let v32: i8x4 = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -791,7 +839,8 @@ pub fn _mm256_cvtepi8_epi64(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovzxwd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepu16_epi32(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepu16_epi32(a: __m128i) -> __m256i { unsafe { transmute::(simd_cast(a.as_u16x8())) } } @@ -803,7 +852,8 @@ pub fn _mm256_cvtepu16_epi32(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovzxwq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepu16_epi64(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepu16_epi64(a: __m128i) -> __m256i { unsafe { let a = a.as_u16x8(); let v64: u16x4 = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -818,7 +868,8 @@ pub fn _mm256_cvtepu16_epi64(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovzxdq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepu32_epi64(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepu32_epi64(a: __m128i) -> __m256i { unsafe { transmute::(simd_cast(a.as_u32x4())) } } @@ -829,7 +880,8 @@ pub fn _mm256_cvtepu32_epi64(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovzxbw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepu8_epi16(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepu8_epi16(a: __m128i) -> __m256i { unsafe { transmute::(simd_cast(a.as_u8x16())) } } @@ -841,7 +893,8 @@ pub fn _mm256_cvtepu8_epi16(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovzxbd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepu8_epi32(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepu8_epi32(a: __m128i) -> __m256i { unsafe { let a = a.as_u8x16(); let v64: u8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -857,7 +910,8 @@ pub fn _mm256_cvtepu8_epi32(a: __m128i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovzxbq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_cvtepu8_epi64(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepu8_epi64(a: __m128i) -> __m256i { unsafe { let a = a.as_u8x16(); let v32: u8x4 = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -873,7 +927,8 @@ pub fn _mm256_cvtepu8_epi64(a: __m128i) -> __m256i { #[cfg_attr(test, assert_instr(vextractf128, IMM1 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_extracti128_si256(a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extracti128_si256(a: __m256i) -> __m128i { static_assert_uimm_bits!(IMM1, 1); unsafe { let a = a.as_i64x4(); @@ -890,7 +945,8 @@ pub fn _mm256_extracti128_si256(a: __m256i) -> __m128i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vphaddw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_hadd_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_hadd_epi16(a: __m256i, b: __m256i) -> __m256i { let a = a.as_i16x16(); let b = b.as_i16x16(); unsafe { @@ -915,7 +971,8 @@ pub fn _mm256_hadd_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vphaddd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_hadd_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_hadd_epi32(a: __m256i, b: __m256i) -> __m256i { let a = a.as_i32x8(); let b = b.as_i32x8(); unsafe { @@ -944,7 +1001,8 @@ pub fn _mm256_hadds_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vphsubw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_hsub_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_hsub_epi16(a: __m256i, b: __m256i) -> __m256i { let a = a.as_i16x16(); let b = b.as_i16x16(); unsafe { @@ -969,7 +1027,8 @@ pub fn _mm256_hsub_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vphsubd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_hsub_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_hsub_epi32(a: __m256i, b: __m256i) -> __m256i { let a = a.as_i32x8(); let b = b.as_i32x8(); unsafe { @@ -1734,7 +1793,8 @@ pub unsafe fn _mm256_mask_i64gather_pd( #[cfg_attr(test, assert_instr(vinsertf128, IMM1 = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_inserti128_si256(a: __m256i, b: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_inserti128_si256(a: __m256i, b: __m128i) -> __m256i { static_assert_uimm_bits!(IMM1, 1); unsafe { let a = a.as_i64x4(); @@ -1753,7 +1813,8 @@ pub fn _mm256_inserti128_si256(a: __m256i, b: __m128i) -> __m25 #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaddwd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_madd_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_madd_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { let r: i32x16 = simd_mul(simd_cast(a.as_i16x16()), simd_cast(b.as_i16x16())); let even: i32x8 = simd_shuffle!(r, r, [0, 2, 4, 6, 8, 10, 12, 14]); @@ -1785,7 +1846,8 @@ pub fn _mm256_maddubs_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaskmovd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_maskload_epi32(mem_addr: *const i32, mask: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskload_epi32(mem_addr: *const i32, mask: __m128i) -> __m128i { let mask = simd_shr(mask.as_i32x4(), i32x4::splat(31)); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, i32x4::ZERO).as_m128i() } @@ -1799,7 +1861,8 @@ pub unsafe fn _mm_maskload_epi32(mem_addr: *const i32, mask: __m128i) -> __m128i #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaskmovd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_maskload_epi32(mem_addr: *const i32, mask: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskload_epi32(mem_addr: *const i32, mask: __m256i) -> __m256i { let mask = simd_shr(mask.as_i32x8(), i32x8::splat(31)); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, i32x8::ZERO).as_m256i() } @@ -1813,7 +1876,8 @@ pub unsafe fn _mm256_maskload_epi32(mem_addr: *const i32, mask: __m256i) -> __m2 #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaskmovq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_maskload_epi64(mem_addr: *const i64, mask: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskload_epi64(mem_addr: *const i64, mask: __m128i) -> __m128i { let mask = simd_shr(mask.as_i64x2(), i64x2::splat(63)); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, i64x2::ZERO).as_m128i() } @@ -1827,7 +1891,8 @@ pub unsafe fn _mm_maskload_epi64(mem_addr: *const i64, mask: __m128i) -> __m128i #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaskmovq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_maskload_epi64(mem_addr: *const i64, mask: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskload_epi64(mem_addr: *const i64, mask: __m256i) -> __m256i { let mask = simd_shr(mask.as_i64x4(), i64x4::splat(63)); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, i64x4::ZERO).as_m256i() } @@ -1841,7 +1906,8 @@ pub unsafe fn _mm256_maskload_epi64(mem_addr: *const i64, mask: __m256i) -> __m2 #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaskmovd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_maskstore_epi32(mem_addr: *mut i32, mask: __m128i, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskstore_epi32(mem_addr: *mut i32, mask: __m128i, a: __m128i) { let mask = simd_shr(mask.as_i32x4(), i32x4::splat(31)); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i32x4()) } @@ -1855,7 +1921,8 @@ pub unsafe fn _mm_maskstore_epi32(mem_addr: *mut i32, mask: __m128i, a: __m128i) #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaskmovd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_maskstore_epi32(mem_addr: *mut i32, mask: __m256i, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskstore_epi32(mem_addr: *mut i32, mask: __m256i, a: __m256i) { let mask = simd_shr(mask.as_i32x8(), i32x8::splat(31)); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i32x8()) } @@ -1869,7 +1936,8 @@ pub unsafe fn _mm256_maskstore_epi32(mem_addr: *mut i32, mask: __m256i, a: __m25 #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaskmovq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_maskstore_epi64(mem_addr: *mut i64, mask: __m128i, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskstore_epi64(mem_addr: *mut i64, mask: __m128i, a: __m128i) { let mask = simd_shr(mask.as_i64x2(), i64x2::splat(63)); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i64x2()) } @@ -1883,7 +1951,8 @@ pub unsafe fn _mm_maskstore_epi64(mem_addr: *mut i64, mask: __m128i, a: __m128i) #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaskmovq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm256_maskstore_epi64(mem_addr: *mut i64, mask: __m256i, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskstore_epi64(mem_addr: *mut i64, mask: __m256i, a: __m256i) { let mask = simd_shr(mask.as_i64x4(), i64x4::splat(63)); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i64x4()) } @@ -1896,7 +1965,8 @@ pub unsafe fn _mm256_maskstore_epi64(mem_addr: *mut i64, mask: __m256i, a: __m25 #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaxsw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_max_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_max_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imax(a.as_i16x16(), b.as_i16x16()).as_m256i() } } @@ -1908,7 +1978,8 @@ pub fn _mm256_max_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaxsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_max_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_max_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imax(a.as_i32x8(), b.as_i32x8()).as_m256i() } } @@ -1920,7 +1991,8 @@ pub fn _mm256_max_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaxsb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_max_epi8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_max_epi8(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imax(a.as_i8x32(), b.as_i8x32()).as_m256i() } } @@ -1932,7 +2004,8 @@ pub fn _mm256_max_epi8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaxuw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_max_epu16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_max_epu16(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imax(a.as_u16x16(), b.as_u16x16()).as_m256i() } } @@ -1944,7 +2017,8 @@ pub fn _mm256_max_epu16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaxud))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_max_epu32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_max_epu32(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imax(a.as_u32x8(), b.as_u32x8()).as_m256i() } } @@ -1956,7 +2030,8 @@ pub fn _mm256_max_epu32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmaxub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_max_epu8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_max_epu8(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imax(a.as_u8x32(), b.as_u8x32()).as_m256i() } } @@ -1968,7 +2043,8 @@ pub fn _mm256_max_epu8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpminsw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_min_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_min_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imin(a.as_i16x16(), b.as_i16x16()).as_m256i() } } @@ -1980,7 +2056,8 @@ pub fn _mm256_min_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpminsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_min_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_min_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imin(a.as_i32x8(), b.as_i32x8()).as_m256i() } } @@ -1992,7 +2069,8 @@ pub fn _mm256_min_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpminsb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_min_epi8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_min_epi8(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imin(a.as_i8x32(), b.as_i8x32()).as_m256i() } } @@ -2004,7 +2082,8 @@ pub fn _mm256_min_epi8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpminuw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_min_epu16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_min_epu16(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imin(a.as_u16x16(), b.as_u16x16()).as_m256i() } } @@ -2016,7 +2095,8 @@ pub fn _mm256_min_epu16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpminud))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_min_epu32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_min_epu32(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imin(a.as_u32x8(), b.as_u32x8()).as_m256i() } } @@ -2028,7 +2108,8 @@ pub fn _mm256_min_epu32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpminub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_min_epu8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_min_epu8(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imin(a.as_u8x32(), b.as_u8x32()).as_m256i() } } @@ -2040,7 +2121,8 @@ pub fn _mm256_min_epu8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmovmskb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_movemask_epi8(a: __m256i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movemask_epi8(a: __m256i) -> i32 { unsafe { let z = i8x32::ZERO; let m: i8x32 = simd_lt(a.as_i8x32(), z); @@ -2077,7 +2159,8 @@ pub fn _mm256_mpsadbw_epu8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmuldq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_mul_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mul_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { let a = simd_cast::<_, i64x4>(simd_cast::<_, i32x4>(a.as_i64x4())); let b = simd_cast::<_, i64x4>(simd_cast::<_, i32x4>(b.as_i64x4())); @@ -2095,11 +2178,12 @@ pub fn _mm256_mul_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmuludq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_mul_epu32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mul_epu32(a: __m256i, b: __m256i) -> __m256i { unsafe { let a = a.as_u64x4(); let b = b.as_u64x4(); - let mask = u64x4::splat(u32::MAX.into()); + let mask = u64x4::splat(u32::MAX as u64); transmute(simd_mul(simd_and(a, mask), simd_and(b, mask))) } } @@ -2113,7 +2197,8 @@ pub fn _mm256_mul_epu32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmulhw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_mulhi_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mulhi_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { let a = simd_cast::<_, i32x16>(a.as_i16x16()); let b = simd_cast::<_, i32x16>(b.as_i16x16()); @@ -2131,7 +2216,8 @@ pub fn _mm256_mulhi_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmulhuw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_mulhi_epu16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mulhi_epu16(a: __m256i, b: __m256i) -> __m256i { unsafe { let a = simd_cast::<_, u32x16>(a.as_u16x16()); let b = simd_cast::<_, u32x16>(b.as_u16x16()); @@ -2149,7 +2235,8 @@ pub fn _mm256_mulhi_epu16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmullw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_mullo_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mullo_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_mul(a.as_i16x16(), b.as_i16x16())) } } @@ -2162,7 +2249,8 @@ pub fn _mm256_mullo_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpmulld))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_mullo_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mullo_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_mul(a.as_i32x8(), b.as_i32x8())) } } @@ -2188,7 +2276,8 @@ pub fn _mm256_mulhrs_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vorps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_or_si256(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_or_si256(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_or(a.as_i32x8(), b.as_i32x8())) } } @@ -2262,7 +2351,8 @@ pub fn _mm256_permutevar8x32_epi32(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpermpd, IMM8 = 9))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_permute4x64_epi64(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_permute4x64_epi64(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); unsafe { let zero = i64x4::ZERO; @@ -2288,7 +2378,8 @@ pub fn _mm256_permute4x64_epi64(a: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vperm2f128, IMM8 = 9))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_permute2x128_si256(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_permute2x128_si256(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_permute2f128_si256::(a, b) } @@ -2302,7 +2393,8 @@ pub fn _mm256_permute2x128_si256(a: __m256i, b: __m256i) -> __m #[cfg_attr(test, assert_instr(vpermpd, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_permute4x64_pd(a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_permute4x64_pd(a: __m256d) -> __m256d { static_assert_uimm_bits!(IMM8, 8); unsafe { simd_shuffle!( @@ -2417,7 +2509,8 @@ pub fn _mm256_shuffle_epi8(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vshufps, MASK = 9))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_shuffle_epi32(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shuffle_epi32(a: __m256i) -> __m256i { static_assert_uimm_bits!(MASK, 8); unsafe { let r: i32x8 = simd_shuffle!( @@ -2448,7 +2541,8 @@ pub fn _mm256_shuffle_epi32(a: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 9))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_shufflehi_epi16(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shufflehi_epi16(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); unsafe { let a = a.as_i16x16(); @@ -2488,7 +2582,8 @@ pub fn _mm256_shufflehi_epi16(a: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 9))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_shufflelo_epi16(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shufflelo_epi16(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); unsafe { let a = a.as_i16x16(); @@ -2602,7 +2697,8 @@ pub fn _mm256_sll_epi64(a: __m256i, count: __m128i) -> __m256i { #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_slli_epi16(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_slli_epi16(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); unsafe { if IMM8 >= 16 { @@ -2622,7 +2718,8 @@ pub fn _mm256_slli_epi16(a: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpslld, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_slli_epi32(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_slli_epi32(a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 32 { @@ -2642,7 +2739,8 @@ pub fn _mm256_slli_epi32(a: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_slli_epi64(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_slli_epi64(a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 64 { @@ -2661,7 +2759,8 @@ pub fn _mm256_slli_epi64(a: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpslldq, IMM8 = 3))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_slli_si256(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_slli_si256(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_bslli_epi128::(a) } @@ -2674,7 +2773,8 @@ pub fn _mm256_slli_si256(a: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpslldq, IMM8 = 3))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_bslli_epi128(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_bslli_epi128(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); const fn mask(shift: i32, i: u32) -> u32 { let shift = shift as u32 & 0xff; @@ -2737,7 +2837,8 @@ pub fn _mm256_bslli_epi128(a: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsllvd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_sllv_epi32(a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sllv_epi32(a: __m128i, count: __m128i) -> __m128i { unsafe { let count = count.as_u32x4(); let no_overflow: u32x4 = simd_lt(count, u32x4::splat(u32::BITS)); @@ -2755,7 +2856,8 @@ pub fn _mm_sllv_epi32(a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsllvd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_sllv_epi32(a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_sllv_epi32(a: __m256i, count: __m256i) -> __m256i { unsafe { let count = count.as_u32x8(); let no_overflow: u32x8 = simd_lt(count, u32x8::splat(u32::BITS)); @@ -2773,7 +2875,8 @@ pub fn _mm256_sllv_epi32(a: __m256i, count: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsllvq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_sllv_epi64(a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sllv_epi64(a: __m128i, count: __m128i) -> __m128i { unsafe { let count = count.as_u64x2(); let no_overflow: u64x2 = simd_lt(count, u64x2::splat(u64::BITS as u64)); @@ -2791,7 +2894,8 @@ pub fn _mm_sllv_epi64(a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsllvq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_sllv_epi64(a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_sllv_epi64(a: __m256i, count: __m256i) -> __m256i { unsafe { let count = count.as_u64x4(); let no_overflow: u64x4 = simd_lt(count, u64x4::splat(u64::BITS as u64)); @@ -2833,7 +2937,8 @@ pub fn _mm256_sra_epi32(a: __m256i, count: __m128i) -> __m256i { #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_srai_epi16(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srai_epi16(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); unsafe { transmute(simd_shr(a.as_i16x16(), i16x16::splat(IMM8.min(15) as i16))) } } @@ -2847,7 +2952,8 @@ pub fn _mm256_srai_epi16(a: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_srai_epi32(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srai_epi32(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); unsafe { transmute(simd_shr(a.as_i32x8(), i32x8::splat(IMM8.min(31)))) } } @@ -2860,7 +2966,8 @@ pub fn _mm256_srai_epi32(a: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsravd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_srav_epi32(a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srav_epi32(a: __m128i, count: __m128i) -> __m128i { unsafe { let count = count.as_u32x4(); let no_overflow: u32x4 = simd_lt(count, u32x4::splat(u32::BITS)); @@ -2877,7 +2984,8 @@ pub fn _mm_srav_epi32(a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsravd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_srav_epi32(a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srav_epi32(a: __m256i, count: __m256i) -> __m256i { unsafe { let count = count.as_u32x8(); let no_overflow: u32x8 = simd_lt(count, u32x8::splat(u32::BITS)); @@ -2894,7 +3002,8 @@ pub fn _mm256_srav_epi32(a: __m256i, count: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpsrldq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_srli_si256(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srli_si256(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_bsrli_epi128::(a) } @@ -2907,7 +3016,8 @@ pub fn _mm256_srli_si256(a: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpsrldq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_bsrli_epi128(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_bsrli_epi128(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); const fn mask(shift: i32, i: u32) -> u32 { let shift = shift as u32 & 0xff; @@ -3006,7 +3116,8 @@ pub fn _mm256_srl_epi64(a: __m256i, count: __m128i) -> __m256i { #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_srli_epi16(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srli_epi16(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); unsafe { if IMM8 >= 16 { @@ -3026,7 +3137,8 @@ pub fn _mm256_srli_epi16(a: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_srli_epi32(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srli_epi32(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); unsafe { if IMM8 >= 32 { @@ -3046,7 +3158,8 @@ pub fn _mm256_srli_epi32(a: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_srli_epi64(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srli_epi64(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); unsafe { if IMM8 >= 64 { @@ -3065,7 +3178,8 @@ pub fn _mm256_srli_epi64(a: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsrlvd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_srlv_epi32(a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srlv_epi32(a: __m128i, count: __m128i) -> __m128i { unsafe { let count = count.as_u32x4(); let no_overflow: u32x4 = simd_lt(count, u32x4::splat(u32::BITS)); @@ -3082,7 +3196,8 @@ pub fn _mm_srlv_epi32(a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsrlvd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_srlv_epi32(a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srlv_epi32(a: __m256i, count: __m256i) -> __m256i { unsafe { let count = count.as_u32x8(); let no_overflow: u32x8 = simd_lt(count, u32x8::splat(u32::BITS)); @@ -3099,7 +3214,8 @@ pub fn _mm256_srlv_epi32(a: __m256i, count: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsrlvq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_srlv_epi64(a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srlv_epi64(a: __m128i, count: __m128i) -> __m128i { unsafe { let count = count.as_u64x2(); let no_overflow: u64x2 = simd_lt(count, u64x2::splat(u64::BITS as u64)); @@ -3116,7 +3232,8 @@ pub fn _mm_srlv_epi64(a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsrlvq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_srlv_epi64(a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srlv_epi64(a: __m256i, count: __m256i) -> __m256i { unsafe { let count = count.as_u64x4(); let no_overflow: u64x4 = simd_lt(count, u64x4::splat(u64::BITS as u64)); @@ -3152,7 +3269,8 @@ pub unsafe fn _mm256_stream_load_si256(mem_addr: *const __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsubw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_sub_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_sub_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_sub(a.as_i16x16(), b.as_i16x16())) } } @@ -3163,7 +3281,8 @@ pub fn _mm256_sub_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsubd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_sub_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_sub_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_sub(a.as_i32x8(), b.as_i32x8())) } } @@ -3174,7 +3293,8 @@ pub fn _mm256_sub_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsubq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_sub_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_sub_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_sub(a.as_i64x4(), b.as_i64x4())) } } @@ -3185,7 +3305,8 @@ pub fn _mm256_sub_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsubb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_sub_epi8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_sub_epi8(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_sub(a.as_i8x32(), b.as_i8x32())) } } @@ -3197,7 +3318,8 @@ pub fn _mm256_sub_epi8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsubsw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_subs_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_subs_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_saturating_sub(a.as_i16x16(), b.as_i16x16())) } } @@ -3209,7 +3331,8 @@ pub fn _mm256_subs_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsubsb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_subs_epi8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_subs_epi8(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_saturating_sub(a.as_i8x32(), b.as_i8x32())) } } @@ -3221,7 +3344,8 @@ pub fn _mm256_subs_epi8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsubusw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_subs_epu16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_subs_epu16(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_saturating_sub(a.as_u16x16(), b.as_u16x16())) } } @@ -3233,7 +3357,8 @@ pub fn _mm256_subs_epu16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpsubusb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_subs_epu8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_subs_epu8(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_saturating_sub(a.as_u8x32(), b.as_u8x32())) } } @@ -3280,7 +3405,8 @@ pub fn _mm256_subs_epu8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpunpckhbw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpackhi_epi8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpackhi_epi8(a: __m256i, b: __m256i) -> __m256i { unsafe { #[rustfmt::skip] let r: i8x32 = simd_shuffle!(a.as_i8x32(), b.as_i8x32(), [ @@ -3335,7 +3461,8 @@ pub fn _mm256_unpackhi_epi8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpunpcklbw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpacklo_epi8(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpacklo_epi8(a: __m256i, b: __m256i) -> __m256i { unsafe { #[rustfmt::skip] let r: i8x32 = simd_shuffle!(a.as_i8x32(), b.as_i8x32(), [ @@ -3386,7 +3513,8 @@ pub fn _mm256_unpacklo_epi8(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpunpckhwd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpackhi_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpackhi_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { let r: i16x16 = simd_shuffle!( a.as_i16x16(), @@ -3436,7 +3564,8 @@ pub fn _mm256_unpackhi_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vpunpcklwd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpacklo_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpacklo_epi16(a: __m256i, b: __m256i) -> __m256i { unsafe { let r: i16x16 = simd_shuffle!( a.as_i16x16(), @@ -3479,7 +3608,8 @@ pub fn _mm256_unpacklo_epi16(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vunpckhps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpackhi_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpackhi_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { let r: i32x8 = simd_shuffle!(a.as_i32x8(), b.as_i32x8(), [2, 10, 3, 11, 6, 14, 7, 15]); transmute(r) @@ -3518,7 +3648,8 @@ pub fn _mm256_unpackhi_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vunpcklps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpacklo_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpacklo_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { let r: i32x8 = simd_shuffle!(a.as_i32x8(), b.as_i32x8(), [0, 8, 1, 9, 4, 12, 5, 13]); transmute(r) @@ -3557,7 +3688,8 @@ pub fn _mm256_unpacklo_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vunpckhpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpackhi_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpackhi_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { let r: i64x4 = simd_shuffle!(a.as_i64x4(), b.as_i64x4(), [1, 5, 3, 7]); transmute(r) @@ -3596,7 +3728,8 @@ pub fn _mm256_unpackhi_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vunpcklpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_unpacklo_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_unpacklo_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { let r: i64x4 = simd_shuffle!(a.as_i64x4(), b.as_i64x4(), [0, 4, 2, 6]); transmute(r) @@ -3611,7 +3744,8 @@ pub fn _mm256_unpacklo_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx2")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_xor_si256(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_xor_si256(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_xor(a.as_i64x4(), b.as_i64x4())) } } @@ -3626,7 +3760,8 @@ pub fn _mm256_xor_si256(a: __m256i, b: __m256i) -> __m256i { // This intrinsic has no corresponding instruction. #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_extract_epi8(a: __m256i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extract_epi8(a: __m256i) -> i32 { static_assert_uimm_bits!(INDEX, 5); unsafe { simd_extract!(a.as_u8x32(), INDEX as u32, u8) as i32 } } @@ -3642,7 +3777,8 @@ pub fn _mm256_extract_epi8(a: __m256i) -> i32 { // This intrinsic has no corresponding instruction. #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_extract_epi16(a: __m256i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extract_epi16(a: __m256i) -> i32 { static_assert_uimm_bits!(INDEX, 4); unsafe { simd_extract!(a.as_u16x16(), INDEX as u32, u16) as i32 } } @@ -3771,13 +3907,14 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86::*; #[simd_test(enable = "avx2")] - unsafe fn test_mm256_abs_epi32() { + const unsafe fn test_mm256_abs_epi32() { #[rustfmt::skip] let a = _mm256_setr_epi32( 0, 1, -1, i32::MAX, @@ -3793,7 +3930,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_abs_epi16() { + const unsafe fn test_mm256_abs_epi16() { #[rustfmt::skip] let a = _mm256_setr_epi16( 0, 1, -1, 2, -2, 3, -3, 4, @@ -3809,7 +3946,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_abs_epi8() { + const unsafe fn test_mm256_abs_epi8() { #[rustfmt::skip] let a = _mm256_setr_epi8( 0, 1, -1, 2, -2, 3, -3, 4, @@ -3829,7 +3966,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_add_epi64() { + const unsafe fn test_mm256_add_epi64() { let a = _mm256_setr_epi64x(-10, 0, 100, 1_000_000_000); let b = _mm256_setr_epi64x(-1, 0, 1, 2); let r = _mm256_add_epi64(a, b); @@ -3838,7 +3975,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_add_epi32() { + const unsafe fn test_mm256_add_epi32() { let a = _mm256_setr_epi32(-1, 0, 1, 2, 3, 4, 5, 6); let b = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_add_epi32(a, b); @@ -3847,7 +3984,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_add_epi16() { + const unsafe fn test_mm256_add_epi16() { #[rustfmt::skip] let a = _mm256_setr_epi16( 0, 1, 2, 3, 4, 5, 6, 7, @@ -3868,7 +4005,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_add_epi8() { + const unsafe fn test_mm256_add_epi8() { #[rustfmt::skip] let a = _mm256_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, @@ -3895,7 +4032,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_adds_epi8() { + const unsafe fn test_mm256_adds_epi8() { #[rustfmt::skip] let a = _mm256_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, @@ -3938,7 +4075,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_adds_epi16() { + const unsafe fn test_mm256_adds_epi16() { #[rustfmt::skip] let a = _mm256_setr_epi16( 0, 1, 2, 3, 4, 5, 6, 7, @@ -3976,7 +4113,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_adds_epu8() { + const unsafe fn test_mm256_adds_epu8() { #[rustfmt::skip] let a = _mm256_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, @@ -4011,7 +4148,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_adds_epu16() { + const unsafe fn test_mm256_adds_epu16() { #[rustfmt::skip] let a = _mm256_setr_epi16( 0, 1, 2, 3, 4, 5, 6, 7, @@ -4041,7 +4178,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_and_si256() { + const unsafe fn test_mm256_and_si256() { let a = _mm256_set1_epi8(5); let b = _mm256_set1_epi8(3); let got = _mm256_and_si256(a, b); @@ -4049,7 +4186,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_andnot_si256() { + const unsafe fn test_mm256_andnot_si256() { let a = _mm256_set1_epi8(5); let b = _mm256_set1_epi8(3); let got = _mm256_andnot_si256(a, b); @@ -4057,21 +4194,21 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_avg_epu8() { + const unsafe fn test_mm256_avg_epu8() { let (a, b) = (_mm256_set1_epi8(3), _mm256_set1_epi8(9)); let r = _mm256_avg_epu8(a, b); assert_eq_m256i(r, _mm256_set1_epi8(6)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_avg_epu16() { + const unsafe fn test_mm256_avg_epu16() { let (a, b) = (_mm256_set1_epi16(3), _mm256_set1_epi16(9)); let r = _mm256_avg_epu16(a, b); assert_eq_m256i(r, _mm256_set1_epi16(6)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm_blend_epi32() { + const unsafe fn test_mm_blend_epi32() { let (a, b) = (_mm_set1_epi32(3), _mm_set1_epi32(9)); let e = _mm_setr_epi32(9, 3, 3, 3); let r = _mm_blend_epi32::<0x01>(a, b); @@ -4082,7 +4219,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_blend_epi32() { + const unsafe fn test_mm256_blend_epi32() { let (a, b) = (_mm256_set1_epi32(3), _mm256_set1_epi32(9)); let e = _mm256_setr_epi32(9, 3, 3, 3, 3, 3, 3, 3); let r = _mm256_blend_epi32::<0x01>(a, b); @@ -4098,7 +4235,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_blend_epi16() { + const unsafe fn test_mm256_blend_epi16() { let (a, b) = (_mm256_set1_epi16(3), _mm256_set1_epi16(9)); let e = _mm256_setr_epi16(9, 3, 3, 3, 3, 3, 3, 3, 9, 3, 3, 3, 3, 3, 3, 3); let r = _mm256_blend_epi16::<0x01>(a, b); @@ -4109,7 +4246,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_blendv_epi8() { + const unsafe fn test_mm256_blendv_epi8() { let (a, b) = (_mm256_set1_epi8(4), _mm256_set1_epi8(2)); let mask = _mm256_insert_epi8::<2>(_mm256_set1_epi8(0), -1); let e = _mm256_insert_epi8::<2>(_mm256_set1_epi8(4), 2); @@ -4118,63 +4255,63 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm_broadcastb_epi8() { + const unsafe fn test_mm_broadcastb_epi8() { let a = _mm_insert_epi8::<0>(_mm_set1_epi8(0x00), 0x2a); let res = _mm_broadcastb_epi8(a); assert_eq_m128i(res, _mm_set1_epi8(0x2a)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_broadcastb_epi8() { + const unsafe fn test_mm256_broadcastb_epi8() { let a = _mm_insert_epi8::<0>(_mm_set1_epi8(0x00), 0x2a); let res = _mm256_broadcastb_epi8(a); assert_eq_m256i(res, _mm256_set1_epi8(0x2a)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm_broadcastd_epi32() { + const unsafe fn test_mm_broadcastd_epi32() { let a = _mm_setr_epi32(0x2a, 0x8000000, 0, 0); let res = _mm_broadcastd_epi32(a); assert_eq_m128i(res, _mm_set1_epi32(0x2a)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_broadcastd_epi32() { + const unsafe fn test_mm256_broadcastd_epi32() { let a = _mm_setr_epi32(0x2a, 0x8000000, 0, 0); let res = _mm256_broadcastd_epi32(a); assert_eq_m256i(res, _mm256_set1_epi32(0x2a)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm_broadcastq_epi64() { + const unsafe fn test_mm_broadcastq_epi64() { let a = _mm_setr_epi64x(0x1ffffffff, 0); let res = _mm_broadcastq_epi64(a); assert_eq_m128i(res, _mm_set1_epi64x(0x1ffffffff)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_broadcastq_epi64() { + const unsafe fn test_mm256_broadcastq_epi64() { let a = _mm_setr_epi64x(0x1ffffffff, 0); let res = _mm256_broadcastq_epi64(a); assert_eq_m256i(res, _mm256_set1_epi64x(0x1ffffffff)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm_broadcastsd_pd() { + const unsafe fn test_mm_broadcastsd_pd() { let a = _mm_setr_pd(6.88, 3.44); let res = _mm_broadcastsd_pd(a); assert_eq_m128d(res, _mm_set1_pd(6.88)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_broadcastsd_pd() { + const unsafe fn test_mm256_broadcastsd_pd() { let a = _mm_setr_pd(6.88, 3.44); let res = _mm256_broadcastsd_pd(a); assert_eq_m256d(res, _mm256_set1_pd(6.88f64)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm_broadcastsi128_si256() { + const unsafe fn test_mm_broadcastsi128_si256() { let a = _mm_setr_epi64x(0x0987654321012334, 0x5678909876543210); let res = _mm_broadcastsi128_si256(a); let retval = _mm256_setr_epi64x( @@ -4187,7 +4324,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_broadcastsi128_si256() { + const unsafe fn test_mm256_broadcastsi128_si256() { let a = _mm_setr_epi64x(0x0987654321012334, 0x5678909876543210); let res = _mm256_broadcastsi128_si256(a); let retval = _mm256_setr_epi64x( @@ -4200,35 +4337,35 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm_broadcastss_ps() { + const unsafe fn test_mm_broadcastss_ps() { let a = _mm_setr_ps(6.88, 3.44, 0.0, 0.0); let res = _mm_broadcastss_ps(a); assert_eq_m128(res, _mm_set1_ps(6.88)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_broadcastss_ps() { + const unsafe fn test_mm256_broadcastss_ps() { let a = _mm_setr_ps(6.88, 3.44, 0.0, 0.0); let res = _mm256_broadcastss_ps(a); assert_eq_m256(res, _mm256_set1_ps(6.88)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm_broadcastw_epi16() { + const unsafe fn test_mm_broadcastw_epi16() { let a = _mm_insert_epi16::<0>(_mm_set1_epi16(0x2a), 0x22b); let res = _mm_broadcastw_epi16(a); assert_eq_m128i(res, _mm_set1_epi16(0x22b)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_broadcastw_epi16() { + const unsafe fn test_mm256_broadcastw_epi16() { let a = _mm_insert_epi16::<0>(_mm_set1_epi16(0x2a), 0x22b); let res = _mm256_broadcastw_epi16(a); assert_eq_m256i(res, _mm256_set1_epi16(0x22b)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cmpeq_epi8() { + const unsafe fn test_mm256_cmpeq_epi8() { #[rustfmt::skip] let a = _mm256_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, @@ -4248,7 +4385,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cmpeq_epi16() { + const unsafe fn test_mm256_cmpeq_epi16() { #[rustfmt::skip] let a = _mm256_setr_epi16( 0, 1, 2, 3, 4, 5, 6, 7, @@ -4264,7 +4401,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cmpeq_epi32() { + const unsafe fn test_mm256_cmpeq_epi32() { let a = _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm256_setr_epi32(7, 6, 2, 4, 3, 2, 1, 0); let r = _mm256_cmpeq_epi32(a, b); @@ -4274,7 +4411,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cmpeq_epi64() { + const unsafe fn test_mm256_cmpeq_epi64() { let a = _mm256_setr_epi64x(0, 1, 2, 3); let b = _mm256_setr_epi64x(3, 2, 2, 0); let r = _mm256_cmpeq_epi64(a, b); @@ -4282,7 +4419,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cmpgt_epi8() { + const unsafe fn test_mm256_cmpgt_epi8() { let a = _mm256_insert_epi8::<0>(_mm256_set1_epi8(0), 5); let b = _mm256_set1_epi8(0); let r = _mm256_cmpgt_epi8(a, b); @@ -4290,7 +4427,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cmpgt_epi16() { + const unsafe fn test_mm256_cmpgt_epi16() { let a = _mm256_insert_epi16::<0>(_mm256_set1_epi16(0), 5); let b = _mm256_set1_epi16(0); let r = _mm256_cmpgt_epi16(a, b); @@ -4298,7 +4435,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cmpgt_epi32() { + const unsafe fn test_mm256_cmpgt_epi32() { let a = _mm256_insert_epi32::<0>(_mm256_set1_epi32(0), 5); let b = _mm256_set1_epi32(0); let r = _mm256_cmpgt_epi32(a, b); @@ -4306,7 +4443,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cmpgt_epi64() { + const unsafe fn test_mm256_cmpgt_epi64() { let a = _mm256_insert_epi64::<0>(_mm256_set1_epi64x(0), 5); let b = _mm256_set1_epi64x(0); let r = _mm256_cmpgt_epi64(a, b); @@ -4314,7 +4451,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepi8_epi16() { + const unsafe fn test_mm256_cvtepi8_epi16() { #[rustfmt::skip] let a = _mm_setr_epi8( 0, 0, -1, 1, -2, 2, -3, 3, @@ -4329,7 +4466,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepi8_epi32() { + const unsafe fn test_mm256_cvtepi8_epi32() { #[rustfmt::skip] let a = _mm_setr_epi8( 0, 0, -1, 1, -2, 2, -3, 3, @@ -4340,7 +4477,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepi8_epi64() { + const unsafe fn test_mm256_cvtepi8_epi64() { #[rustfmt::skip] let a = _mm_setr_epi8( 0, 0, -1, 1, -2, 2, -3, 3, @@ -4351,49 +4488,49 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepi16_epi32() { + const unsafe fn test_mm256_cvtepi16_epi32() { let a = _mm_setr_epi16(0, 0, -1, 1, -2, 2, -3, 3); let r = _mm256_setr_epi32(0, 0, -1, 1, -2, 2, -3, 3); assert_eq_m256i(r, _mm256_cvtepi16_epi32(a)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepi16_epi64() { + const unsafe fn test_mm256_cvtepi16_epi64() { let a = _mm_setr_epi16(0, 0, -1, 1, -2, 2, -3, 3); let r = _mm256_setr_epi64x(0, 0, -1, 1); assert_eq_m256i(r, _mm256_cvtepi16_epi64(a)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepi32_epi64() { + const unsafe fn test_mm256_cvtepi32_epi64() { let a = _mm_setr_epi32(0, 0, -1, 1); let r = _mm256_setr_epi64x(0, 0, -1, 1); assert_eq_m256i(r, _mm256_cvtepi32_epi64(a)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepu16_epi32() { + const unsafe fn test_mm256_cvtepu16_epi32() { let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7); assert_eq_m256i(r, _mm256_cvtepu16_epi32(a)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepu16_epi64() { + const unsafe fn test_mm256_cvtepu16_epi64() { let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm256_setr_epi64x(0, 1, 2, 3); assert_eq_m256i(r, _mm256_cvtepu16_epi64(a)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepu32_epi64() { + const unsafe fn test_mm256_cvtepu32_epi64() { let a = _mm_setr_epi32(0, 1, 2, 3); let r = _mm256_setr_epi64x(0, 1, 2, 3); assert_eq_m256i(r, _mm256_cvtepu32_epi64(a)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepu8_epi16() { + const unsafe fn test_mm256_cvtepu8_epi16() { #[rustfmt::skip] let a = _mm_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, @@ -4408,7 +4545,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepu8_epi32() { + const unsafe fn test_mm256_cvtepu8_epi32() { #[rustfmt::skip] let a = _mm_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, @@ -4419,7 +4556,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_cvtepu8_epi64() { + const unsafe fn test_mm256_cvtepu8_epi64() { #[rustfmt::skip] let a = _mm_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, @@ -4430,7 +4567,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_extracti128_si256() { + const unsafe fn test_mm256_extracti128_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let r = _mm256_extracti128_si256::<1>(a); let e = _mm_setr_epi64x(3, 4); @@ -4438,7 +4575,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_hadd_epi16() { + const unsafe fn test_mm256_hadd_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(4); let r = _mm256_hadd_epi16(a, b); @@ -4447,7 +4584,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_hadd_epi32() { + const unsafe fn test_mm256_hadd_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(4); let r = _mm256_hadd_epi32(a, b); @@ -4471,7 +4608,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_hsub_epi16() { + const unsafe fn test_mm256_hsub_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(4); let r = _mm256_hsub_epi16(a, b); @@ -4480,7 +4617,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_hsub_epi32() { + const unsafe fn test_mm256_hsub_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(4); let r = _mm256_hsub_epi32(a, b); @@ -4500,7 +4637,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_madd_epi16() { + const unsafe fn test_mm256_madd_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(4); let r = _mm256_madd_epi16(a, b); @@ -4509,7 +4646,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_inserti128_si256() { + const unsafe fn test_mm256_inserti128_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let b = _mm_setr_epi64x(7, 8); let r = _mm256_inserti128_si256::<1>(a, b); @@ -4527,7 +4664,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm_maskload_epi32() { + const unsafe fn test_mm_maskload_epi32() { let nums = [1, 2, 3, 4]; let a = &nums as *const i32; let mask = _mm_setr_epi32(-1, 0, 0, -1); @@ -4537,7 +4674,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_maskload_epi32() { + const unsafe fn test_mm256_maskload_epi32() { let nums = [1, 2, 3, 4, 5, 6, 7, 8]; let a = &nums as *const i32; let mask = _mm256_setr_epi32(-1, 0, 0, -1, 0, -1, -1, 0); @@ -4547,7 +4684,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm_maskload_epi64() { + const unsafe fn test_mm_maskload_epi64() { let nums = [1_i64, 2_i64]; let a = &nums as *const i64; let mask = _mm_setr_epi64x(0, -1); @@ -4557,7 +4694,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_maskload_epi64() { + const unsafe fn test_mm256_maskload_epi64() { let nums = [1_i64, 2_i64, 3_i64, 4_i64]; let a = &nums as *const i64; let mask = _mm256_setr_epi64x(0, -1, -1, 0); @@ -4567,7 +4704,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm_maskstore_epi32() { + const unsafe fn test_mm_maskstore_epi32() { let a = _mm_setr_epi32(1, 2, 3, 4); let mut arr = [-1, -1, -1, -1]; let mask = _mm_setr_epi32(-1, 0, 0, -1); @@ -4577,7 +4714,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_maskstore_epi32() { + const unsafe fn test_mm256_maskstore_epi32() { let a = _mm256_setr_epi32(1, 0x6d726f, 3, 42, 0x777161, 6, 7, 8); let mut arr = [-1, -1, -1, 0x776173, -1, 0x68657265, -1, -1]; let mask = _mm256_setr_epi32(-1, 0, 0, -1, 0, -1, -1, 0); @@ -4587,7 +4724,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm_maskstore_epi64() { + const unsafe fn test_mm_maskstore_epi64() { let a = _mm_setr_epi64x(1_i64, 2_i64); let mut arr = [-1_i64, -1_i64]; let mask = _mm_setr_epi64x(0, -1); @@ -4597,7 +4734,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_maskstore_epi64() { + const unsafe fn test_mm256_maskstore_epi64() { let a = _mm256_setr_epi64x(1_i64, 2_i64, 3_i64, 4_i64); let mut arr = [-1_i64, -1_i64, -1_i64, -1_i64]; let mask = _mm256_setr_epi64x(0, -1, -1, 0); @@ -4607,7 +4744,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_max_epi16() { + const unsafe fn test_mm256_max_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(4); let r = _mm256_max_epi16(a, b); @@ -4615,7 +4752,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_max_epi32() { + const unsafe fn test_mm256_max_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(4); let r = _mm256_max_epi32(a, b); @@ -4623,7 +4760,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_max_epi8() { + const unsafe fn test_mm256_max_epi8() { let a = _mm256_set1_epi8(2); let b = _mm256_set1_epi8(4); let r = _mm256_max_epi8(a, b); @@ -4631,7 +4768,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_max_epu16() { + const unsafe fn test_mm256_max_epu16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(4); let r = _mm256_max_epu16(a, b); @@ -4639,7 +4776,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_max_epu32() { + const unsafe fn test_mm256_max_epu32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(4); let r = _mm256_max_epu32(a, b); @@ -4647,7 +4784,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_max_epu8() { + const unsafe fn test_mm256_max_epu8() { let a = _mm256_set1_epi8(2); let b = _mm256_set1_epi8(4); let r = _mm256_max_epu8(a, b); @@ -4655,7 +4792,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_min_epi16() { + const unsafe fn test_mm256_min_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(4); let r = _mm256_min_epi16(a, b); @@ -4663,7 +4800,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_min_epi32() { + const unsafe fn test_mm256_min_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(4); let r = _mm256_min_epi32(a, b); @@ -4671,7 +4808,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_min_epi8() { + const unsafe fn test_mm256_min_epi8() { let a = _mm256_set1_epi8(2); let b = _mm256_set1_epi8(4); let r = _mm256_min_epi8(a, b); @@ -4679,7 +4816,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_min_epu16() { + const unsafe fn test_mm256_min_epu16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(4); let r = _mm256_min_epu16(a, b); @@ -4687,7 +4824,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_min_epu32() { + const unsafe fn test_mm256_min_epu32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(4); let r = _mm256_min_epu32(a, b); @@ -4695,7 +4832,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_min_epu8() { + const unsafe fn test_mm256_min_epu8() { let a = _mm256_set1_epi8(2); let b = _mm256_set1_epi8(4); let r = _mm256_min_epu8(a, b); @@ -4703,7 +4840,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_movemask_epi8() { + const unsafe fn test_mm256_movemask_epi8() { let a = _mm256_set1_epi8(-1); let r = _mm256_movemask_epi8(a); let e = -1; @@ -4720,7 +4857,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_mul_epi32() { + const unsafe fn test_mm256_mul_epi32() { let a = _mm256_setr_epi32(0, 0, 0, 0, 2, 2, 2, 2); let b = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_mul_epi32(a, b); @@ -4729,7 +4866,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_mul_epu32() { + const unsafe fn test_mm256_mul_epu32() { let a = _mm256_setr_epi32(0, 0, 0, 0, 2, 2, 2, 2); let b = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_mul_epu32(a, b); @@ -4738,7 +4875,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_mulhi_epi16() { + const unsafe fn test_mm256_mulhi_epi16() { let a = _mm256_set1_epi16(6535); let b = _mm256_set1_epi16(6535); let r = _mm256_mulhi_epi16(a, b); @@ -4747,7 +4884,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_mulhi_epu16() { + const unsafe fn test_mm256_mulhi_epu16() { let a = _mm256_set1_epi16(6535); let b = _mm256_set1_epi16(6535); let r = _mm256_mulhi_epu16(a, b); @@ -4756,7 +4893,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_mullo_epi16() { + const unsafe fn test_mm256_mullo_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(4); let r = _mm256_mullo_epi16(a, b); @@ -4765,7 +4902,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_mullo_epi32() { + const unsafe fn test_mm256_mullo_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(4); let r = _mm256_mullo_epi32(a, b); @@ -4783,7 +4920,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_or_si256() { + const unsafe fn test_mm256_or_si256() { let a = _mm256_set1_epi8(-1); let b = _mm256_set1_epi8(0); let r = _mm256_or_si256(a, b); @@ -4852,7 +4989,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_shufflehi_epi16() { + const unsafe fn test_mm256_shufflehi_epi16() { #[rustfmt::skip] let a = _mm256_setr_epi16( 0, 1, 2, 3, 11, 22, 33, 44, @@ -4868,7 +5005,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_shufflelo_epi16() { + const unsafe fn test_mm256_shufflelo_epi16() { #[rustfmt::skip] let a = _mm256_setr_epi16( 11, 22, 33, 44, 0, 1, 2, 3, @@ -4935,7 +5072,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_slli_epi16() { + const unsafe fn test_mm256_slli_epi16() { assert_eq_m256i( _mm256_slli_epi16::<4>(_mm256_set1_epi16(0xFF)), _mm256_set1_epi16(0xFF0), @@ -4943,7 +5080,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_slli_epi32() { + const unsafe fn test_mm256_slli_epi32() { assert_eq_m256i( _mm256_slli_epi32::<4>(_mm256_set1_epi32(0xFFFF)), _mm256_set1_epi32(0xFFFF0), @@ -4951,7 +5088,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_slli_epi64() { + const unsafe fn test_mm256_slli_epi64() { assert_eq_m256i( _mm256_slli_epi64::<4>(_mm256_set1_epi64x(0xFFFFFFFF)), _mm256_set1_epi64x(0xFFFFFFFF0), @@ -4959,14 +5096,14 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_slli_si256() { + const unsafe fn test_mm256_slli_si256() { let a = _mm256_set1_epi64x(0xFFFFFFFF); let r = _mm256_slli_si256::<3>(a); assert_eq_m256i(r, _mm256_set1_epi64x(0xFFFFFFFF000000)); } #[simd_test(enable = "avx2")] - unsafe fn test_mm_sllv_epi32() { + const unsafe fn test_mm_sllv_epi32() { let a = _mm_set1_epi32(2); let b = _mm_set1_epi32(1); let r = _mm_sllv_epi32(a, b); @@ -4975,7 +5112,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_sllv_epi32() { + const unsafe fn test_mm256_sllv_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(1); let r = _mm256_sllv_epi32(a, b); @@ -4984,7 +5121,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm_sllv_epi64() { + const unsafe fn test_mm_sllv_epi64() { let a = _mm_set1_epi64x(2); let b = _mm_set1_epi64x(1); let r = _mm_sllv_epi64(a, b); @@ -4993,7 +5130,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_sllv_epi64() { + const unsafe fn test_mm256_sllv_epi64() { let a = _mm256_set1_epi64x(2); let b = _mm256_set1_epi64x(1); let r = _mm256_sllv_epi64(a, b); @@ -5018,7 +5155,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_srai_epi16() { + const unsafe fn test_mm256_srai_epi16() { assert_eq_m256i( _mm256_srai_epi16::<1>(_mm256_set1_epi16(-1)), _mm256_set1_epi16(-1), @@ -5026,7 +5163,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_srai_epi32() { + const unsafe fn test_mm256_srai_epi32() { assert_eq_m256i( _mm256_srai_epi32::<1>(_mm256_set1_epi32(-1)), _mm256_set1_epi32(-1), @@ -5034,7 +5171,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm_srav_epi32() { + const unsafe fn test_mm_srav_epi32() { let a = _mm_set1_epi32(4); let count = _mm_set1_epi32(1); let r = _mm_srav_epi32(a, count); @@ -5043,7 +5180,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_srav_epi32() { + const unsafe fn test_mm256_srav_epi32() { let a = _mm256_set1_epi32(4); let count = _mm256_set1_epi32(1); let r = _mm256_srav_epi32(a, count); @@ -5052,7 +5189,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_srli_si256() { + const unsafe fn test_mm256_srli_si256() { #[rustfmt::skip] let a = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, @@ -5096,7 +5233,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_srli_epi16() { + const unsafe fn test_mm256_srli_epi16() { assert_eq_m256i( _mm256_srli_epi16::<4>(_mm256_set1_epi16(0xFF)), _mm256_set1_epi16(0xF), @@ -5104,7 +5241,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_srli_epi32() { + const unsafe fn test_mm256_srli_epi32() { assert_eq_m256i( _mm256_srli_epi32::<4>(_mm256_set1_epi32(0xFFFF)), _mm256_set1_epi32(0xFFF), @@ -5112,7 +5249,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_srli_epi64() { + const unsafe fn test_mm256_srli_epi64() { assert_eq_m256i( _mm256_srli_epi64::<4>(_mm256_set1_epi64x(0xFFFFFFFF)), _mm256_set1_epi64x(0xFFFFFFF), @@ -5120,7 +5257,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm_srlv_epi32() { + const unsafe fn test_mm_srlv_epi32() { let a = _mm_set1_epi32(2); let count = _mm_set1_epi32(1); let r = _mm_srlv_epi32(a, count); @@ -5129,7 +5266,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_srlv_epi32() { + const unsafe fn test_mm256_srlv_epi32() { let a = _mm256_set1_epi32(2); let count = _mm256_set1_epi32(1); let r = _mm256_srlv_epi32(a, count); @@ -5138,7 +5275,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm_srlv_epi64() { + const unsafe fn test_mm_srlv_epi64() { let a = _mm_set1_epi64x(2); let count = _mm_set1_epi64x(1); let r = _mm_srlv_epi64(a, count); @@ -5147,7 +5284,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_srlv_epi64() { + const unsafe fn test_mm256_srlv_epi64() { let a = _mm256_set1_epi64x(2); let count = _mm256_set1_epi64x(1); let r = _mm256_srlv_epi64(a, count); @@ -5163,7 +5300,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_sub_epi16() { + const unsafe fn test_mm256_sub_epi16() { let a = _mm256_set1_epi16(4); let b = _mm256_set1_epi16(2); let r = _mm256_sub_epi16(a, b); @@ -5171,7 +5308,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_sub_epi32() { + const unsafe fn test_mm256_sub_epi32() { let a = _mm256_set1_epi32(4); let b = _mm256_set1_epi32(2); let r = _mm256_sub_epi32(a, b); @@ -5179,7 +5316,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_sub_epi64() { + const unsafe fn test_mm256_sub_epi64() { let a = _mm256_set1_epi64x(4); let b = _mm256_set1_epi64x(2); let r = _mm256_sub_epi64(a, b); @@ -5187,7 +5324,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_sub_epi8() { + const unsafe fn test_mm256_sub_epi8() { let a = _mm256_set1_epi8(4); let b = _mm256_set1_epi8(2); let r = _mm256_sub_epi8(a, b); @@ -5195,7 +5332,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_subs_epi16() { + const unsafe fn test_mm256_subs_epi16() { let a = _mm256_set1_epi16(4); let b = _mm256_set1_epi16(2); let r = _mm256_subs_epi16(a, b); @@ -5203,7 +5340,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_subs_epi8() { + const unsafe fn test_mm256_subs_epi8() { let a = _mm256_set1_epi8(4); let b = _mm256_set1_epi8(2); let r = _mm256_subs_epi8(a, b); @@ -5211,7 +5348,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_subs_epu16() { + const unsafe fn test_mm256_subs_epu16() { let a = _mm256_set1_epi16(4); let b = _mm256_set1_epi16(2); let r = _mm256_subs_epu16(a, b); @@ -5219,7 +5356,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_subs_epu8() { + const unsafe fn test_mm256_subs_epu8() { let a = _mm256_set1_epi8(4); let b = _mm256_set1_epi8(2); let r = _mm256_subs_epu8(a, b); @@ -5227,7 +5364,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_xor_si256() { + const unsafe fn test_mm256_xor_si256() { let a = _mm256_set1_epi8(5); let b = _mm256_set1_epi8(3); let r = _mm256_xor_si256(a, b); @@ -5235,7 +5372,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_alignr_epi8() { + const unsafe fn test_mm256_alignr_epi8() { #[rustfmt::skip] let a = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, @@ -5327,7 +5464,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_permute4x64_epi64() { + const unsafe fn test_mm256_permute4x64_epi64() { let a = _mm256_setr_epi64x(100, 200, 300, 400); let expected = _mm256_setr_epi64x(400, 100, 200, 100); let r = _mm256_permute4x64_epi64::<0b00010011>(a); @@ -5335,7 +5472,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_permute2x128_si256() { + const unsafe fn test_mm256_permute2x128_si256() { let a = _mm256_setr_epi64x(100, 200, 500, 600); let b = _mm256_setr_epi64x(300, 400, 700, 800); let r = _mm256_permute2x128_si256::<0b00_01_00_11>(a, b); @@ -5344,7 +5481,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_permute4x64_pd() { + const unsafe fn test_mm256_permute4x64_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let r = _mm256_permute4x64_pd::<0b00_01_00_11>(a); let e = _mm256_setr_pd(4., 1., 2., 1.); @@ -5702,7 +5839,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_extract_epi8() { + const unsafe fn test_mm256_extract_epi8() { #[rustfmt::skip] let a = _mm256_setr_epi8( -1, 1, 2, 3, 4, 5, 6, 7, @@ -5717,7 +5854,7 @@ mod tests { } #[simd_test(enable = "avx2")] - unsafe fn test_mm256_extract_epi16() { + const unsafe fn test_mm256_extract_epi16() { #[rustfmt::skip] let a = _mm256_setr_epi16( -1, 1, 2, 3, 4, 5, 6, 7, diff --git a/crates/core_arch/src/x86/avx512bitalg.rs b/crates/core_arch/src/x86/avx512bitalg.rs index 1cbf0faea0..832a584322 100644 --- a/crates/core_arch/src/x86/avx512bitalg.rs +++ b/crates/core_arch/src/x86/avx512bitalg.rs @@ -43,7 +43,8 @@ unsafe extern "C" { #[target_feature(enable = "avx512bitalg")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntw))] -pub fn _mm512_popcnt_epi16(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_popcnt_epi16(a: __m512i) -> __m512i { unsafe { transmute(simd_ctpop(a.as_i16x32())) } } @@ -57,7 +58,8 @@ pub fn _mm512_popcnt_epi16(a: __m512i) -> __m512i { #[target_feature(enable = "avx512bitalg")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntw))] -pub fn _mm512_maskz_popcnt_epi16(k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_popcnt_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask( k, @@ -77,7 +79,8 @@ pub fn _mm512_maskz_popcnt_epi16(k: __mmask32, a: __m512i) -> __m512i { #[target_feature(enable = "avx512bitalg")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntw))] -pub fn _mm512_mask_popcnt_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_popcnt_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask( k, @@ -94,7 +97,8 @@ pub fn _mm512_mask_popcnt_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m51 #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntw))] -pub fn _mm256_popcnt_epi16(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_popcnt_epi16(a: __m256i) -> __m256i { unsafe { transmute(simd_ctpop(a.as_i16x16())) } } @@ -108,7 +112,8 @@ pub fn _mm256_popcnt_epi16(a: __m256i) -> __m256i { #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntw))] -pub fn _mm256_maskz_popcnt_epi16(k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_popcnt_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask( k, @@ -128,7 +133,8 @@ pub fn _mm256_maskz_popcnt_epi16(k: __mmask16, a: __m256i) -> __m256i { #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntw))] -pub fn _mm256_mask_popcnt_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_popcnt_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask( k, @@ -145,7 +151,8 @@ pub fn _mm256_mask_popcnt_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m25 #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntw))] -pub fn _mm_popcnt_epi16(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_popcnt_epi16(a: __m128i) -> __m128i { unsafe { transmute(simd_ctpop(a.as_i16x8())) } } @@ -159,7 +166,8 @@ pub fn _mm_popcnt_epi16(a: __m128i) -> __m128i { #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntw))] -pub fn _mm_maskz_popcnt_epi16(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_popcnt_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask( k, @@ -179,7 +187,8 @@ pub fn _mm_maskz_popcnt_epi16(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntw))] -pub fn _mm_mask_popcnt_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_popcnt_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask( k, @@ -196,7 +205,8 @@ pub fn _mm_mask_popcnt_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bitalg")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntb))] -pub fn _mm512_popcnt_epi8(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_popcnt_epi8(a: __m512i) -> __m512i { unsafe { transmute(simd_ctpop(a.as_i8x64())) } } @@ -210,7 +220,8 @@ pub fn _mm512_popcnt_epi8(a: __m512i) -> __m512i { #[target_feature(enable = "avx512bitalg")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntb))] -pub fn _mm512_maskz_popcnt_epi8(k: __mmask64, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_popcnt_epi8(k: __mmask64, a: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask( k, @@ -230,7 +241,8 @@ pub fn _mm512_maskz_popcnt_epi8(k: __mmask64, a: __m512i) -> __m512i { #[target_feature(enable = "avx512bitalg")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntb))] -pub fn _mm512_mask_popcnt_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_popcnt_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask( k, @@ -247,7 +259,8 @@ pub fn _mm512_mask_popcnt_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512 #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntb))] -pub fn _mm256_popcnt_epi8(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_popcnt_epi8(a: __m256i) -> __m256i { unsafe { transmute(simd_ctpop(a.as_i8x32())) } } @@ -261,7 +274,8 @@ pub fn _mm256_popcnt_epi8(a: __m256i) -> __m256i { #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntb))] -pub fn _mm256_maskz_popcnt_epi8(k: __mmask32, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_popcnt_epi8(k: __mmask32, a: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask( k, @@ -281,7 +295,8 @@ pub fn _mm256_maskz_popcnt_epi8(k: __mmask32, a: __m256i) -> __m256i { #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntb))] -pub fn _mm256_mask_popcnt_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_popcnt_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask( k, @@ -298,7 +313,8 @@ pub fn _mm256_mask_popcnt_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256 #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntb))] -pub fn _mm_popcnt_epi8(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_popcnt_epi8(a: __m128i) -> __m128i { unsafe { transmute(simd_ctpop(a.as_i8x16())) } } @@ -312,7 +328,8 @@ pub fn _mm_popcnt_epi8(a: __m128i) -> __m128i { #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntb))] -pub fn _mm_maskz_popcnt_epi8(k: __mmask16, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_popcnt_epi8(k: __mmask16, a: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask( k, @@ -332,7 +349,8 @@ pub fn _mm_maskz_popcnt_epi8(k: __mmask16, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bitalg,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntb))] -pub fn _mm_mask_popcnt_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_popcnt_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask( k, @@ -436,12 +454,13 @@ mod tests { // __mXXXi happens to be defined in terms of signed integers. #![allow(overflowing_literals)] + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86::*; #[simd_test(enable = "avx512bitalg,avx512f")] - unsafe fn test_mm512_popcnt_epi16() { + const unsafe fn test_mm512_popcnt_epi16() { let test_data = _mm512_set_epi16( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1_FF, 0x3_FF, 0x7_FF, 0xF_FF, 0x1F_FF, 0x3F_FF, 0x7F_FF, 0xFF_FF, -1, -100, 255, 256, 2, 4, 8, 16, 32, 64, 128, 256, 512, @@ -456,7 +475,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f")] - unsafe fn test_mm512_maskz_popcnt_epi16() { + const unsafe fn test_mm512_maskz_popcnt_epi16() { let test_data = _mm512_set_epi16( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1_FF, 0x3_FF, 0x7_FF, 0xF_FF, 0x1F_FF, 0x3F_FF, 0x7F_FF, 0xFF_FF, -1, -100, 255, 256, 2, 4, 8, 16, 32, 64, 128, 256, 512, @@ -472,7 +491,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f")] - unsafe fn test_mm512_mask_popcnt_epi16() { + const unsafe fn test_mm512_mask_popcnt_epi16() { let test_data = _mm512_set_epi16( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1_FF, 0x3_FF, 0x7_FF, 0xF_FF, 0x1F_FF, 0x3F_FF, 0x7F_FF, 0xFF_FF, -1, -100, 255, 256, 2, 4, 8, 16, 32, 64, 128, 256, 512, @@ -488,7 +507,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm256_popcnt_epi16() { + const unsafe fn test_mm256_popcnt_epi16() { let test_data = _mm256_set_epi16( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1_FF, 0x3_FF, 0x7_FF, 0xF_FF, 0x1F_FF, 0x3F_FF, 0x7F_FF, @@ -500,7 +519,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm256_maskz_popcnt_epi16() { + const unsafe fn test_mm256_maskz_popcnt_epi16() { let test_data = _mm256_set_epi16( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1_FF, 0x3_FF, 0x7_FF, 0xF_FF, 0x1F_FF, 0x3F_FF, 0x7F_FF, @@ -512,7 +531,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm256_mask_popcnt_epi16() { + const unsafe fn test_mm256_mask_popcnt_epi16() { let test_data = _mm256_set_epi16( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1_FF, 0x3_FF, 0x7_FF, 0xF_FF, 0x1F_FF, 0x3F_FF, 0x7F_FF, @@ -526,7 +545,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm_popcnt_epi16() { + const unsafe fn test_mm_popcnt_epi16() { let test_data = _mm_set_epi16(0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F); let actual_result = _mm_popcnt_epi16(test_data); let reference_result = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); @@ -534,7 +553,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm_maskz_popcnt_epi16() { + const unsafe fn test_mm_maskz_popcnt_epi16() { let test_data = _mm_set_epi16(0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F); let mask = 0xF0; let actual_result = _mm_maskz_popcnt_epi16(mask, test_data); @@ -543,7 +562,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm_mask_popcnt_epi16() { + const unsafe fn test_mm_mask_popcnt_epi16() { let test_data = _mm_set_epi16(0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F); let mask = 0xF0; let actual_result = _mm_mask_popcnt_epi16(test_data, mask, test_data); @@ -552,7 +571,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f")] - unsafe fn test_mm512_popcnt_epi8() { + const unsafe fn test_mm512_popcnt_epi8() { let test_data = _mm512_set_epi8( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, -1, 2, 4, 8, 16, 32, 64, 128, 171, 206, 100, 217, 109, 253, 190, 177, 254, 179, 215, 230, 68, 201, 172, 183, 154, 84, 56, 227, 189, @@ -569,7 +588,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f")] - unsafe fn test_mm512_maskz_popcnt_epi8() { + const unsafe fn test_mm512_maskz_popcnt_epi8() { let test_data = _mm512_set_epi8( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, -1, 2, 4, 8, 16, 32, 64, 128, 171, 206, 100, 217, 109, 253, 190, 177, 254, 179, 215, 230, 68, 201, 172, 183, 154, 84, 56, 227, 189, @@ -587,7 +606,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f")] - unsafe fn test_mm512_mask_popcnt_epi8() { + const unsafe fn test_mm512_mask_popcnt_epi8() { let test_data = _mm512_set_epi8( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, -1, 2, 4, 8, 16, 32, 64, 128, 171, 206, 100, 217, 109, 253, 190, 177, 254, 179, 215, 230, 68, 201, 172, 183, 154, 84, 56, 227, 189, @@ -605,7 +624,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm256_popcnt_epi8() { + const unsafe fn test_mm256_popcnt_epi8() { let test_data = _mm256_set_epi8( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, -1, 2, 4, 8, 16, 32, 64, 128, 171, 206, 100, 217, 109, 253, 190, 177, 254, 179, 215, 230, 68, 201, 172, @@ -619,7 +638,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm256_maskz_popcnt_epi8() { + const unsafe fn test_mm256_maskz_popcnt_epi8() { let test_data = _mm256_set_epi8( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, -1, 2, 4, 8, 16, 32, 64, 251, 73, 121, 143, 145, 85, 91, 137, 90, 225, 21, 249, 211, 155, 228, 70, @@ -634,7 +653,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm256_mask_popcnt_epi8() { + const unsafe fn test_mm256_mask_popcnt_epi8() { let test_data = _mm256_set_epi8( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, -1, 2, 4, 8, 16, 32, 64, 251, 73, 121, 143, 145, 85, 91, 137, 90, 225, 21, 249, 211, 155, 228, 70, @@ -649,7 +668,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm_popcnt_epi8() { + const unsafe fn test_mm_popcnt_epi8() { let test_data = _mm_set_epi8( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, -1, 2, 4, 8, 16, 32, 64, ); @@ -659,7 +678,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm_maskz_popcnt_epi8() { + const unsafe fn test_mm_maskz_popcnt_epi8() { let test_data = _mm_set_epi8( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 90, 225, 21, 249, 211, 155, 228, 70, ); @@ -670,7 +689,7 @@ mod tests { } #[simd_test(enable = "avx512bitalg,avx512f,avx512vl")] - unsafe fn test_mm_mask_popcnt_epi8() { + const unsafe fn test_mm_mask_popcnt_epi8() { let test_data = _mm_set_epi8( 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 90, 225, 21, 249, 211, 155, 228, 70, ); diff --git a/crates/core_arch/src/x86/avx512bw.rs b/crates/core_arch/src/x86/avx512bw.rs index 5d17d42532..b6e941908a 100644 --- a/crates/core_arch/src/x86/avx512bw.rs +++ b/crates/core_arch/src/x86/avx512bw.rs @@ -14,7 +14,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsw))] -pub fn _mm512_abs_epi16(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_abs_epi16(a: __m512i) -> __m512i { unsafe { let a = a.as_i16x32(); let cmp: i16x32 = simd_gt(a, i16x32::ZERO); @@ -29,7 +30,8 @@ pub fn _mm512_abs_epi16(a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsw))] -pub fn _mm512_mask_abs_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_abs_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { unsafe { let abs = _mm512_abs_epi16(a).as_i16x32(); transmute(simd_select_bitmask(k, abs, src.as_i16x32())) @@ -43,7 +45,8 @@ pub fn _mm512_mask_abs_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsw))] -pub fn _mm512_maskz_abs_epi16(k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_abs_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { let abs = _mm512_abs_epi16(a).as_i16x32(); transmute(simd_select_bitmask(k, abs, i16x32::ZERO)) @@ -57,7 +60,8 @@ pub fn _mm512_maskz_abs_epi16(k: __mmask32, a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsw))] -pub fn _mm256_mask_abs_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_abs_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { unsafe { let abs = _mm256_abs_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, abs, src.as_i16x16())) @@ -71,7 +75,8 @@ pub fn _mm256_mask_abs_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsw))] -pub fn _mm256_maskz_abs_epi16(k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_abs_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { let abs = _mm256_abs_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, abs, i16x16::ZERO)) @@ -85,7 +90,8 @@ pub fn _mm256_maskz_abs_epi16(k: __mmask16, a: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsw))] -pub fn _mm_mask_abs_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_abs_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let abs = _mm_abs_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, abs, src.as_i16x8())) @@ -99,7 +105,8 @@ pub fn _mm_mask_abs_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsw))] -pub fn _mm_maskz_abs_epi16(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_abs_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { let abs = _mm_abs_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, abs, i16x8::ZERO)) @@ -113,7 +120,8 @@ pub fn _mm_maskz_abs_epi16(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsb))] -pub fn _mm512_abs_epi8(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_abs_epi8(a: __m512i) -> __m512i { unsafe { let a = a.as_i8x64(); let cmp: i8x64 = simd_gt(a, i8x64::ZERO); @@ -128,7 +136,8 @@ pub fn _mm512_abs_epi8(a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsb))] -pub fn _mm512_mask_abs_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_abs_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { unsafe { let abs = _mm512_abs_epi8(a).as_i8x64(); transmute(simd_select_bitmask(k, abs, src.as_i8x64())) @@ -142,7 +151,8 @@ pub fn _mm512_mask_abs_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsb))] -pub fn _mm512_maskz_abs_epi8(k: __mmask64, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_abs_epi8(k: __mmask64, a: __m512i) -> __m512i { unsafe { let abs = _mm512_abs_epi8(a).as_i8x64(); transmute(simd_select_bitmask(k, abs, i8x64::ZERO)) @@ -156,7 +166,8 @@ pub fn _mm512_maskz_abs_epi8(k: __mmask64, a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsb))] -pub fn _mm256_mask_abs_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_abs_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { unsafe { let abs = _mm256_abs_epi8(a).as_i8x32(); transmute(simd_select_bitmask(k, abs, src.as_i8x32())) @@ -170,7 +181,8 @@ pub fn _mm256_mask_abs_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsb))] -pub fn _mm256_maskz_abs_epi8(k: __mmask32, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_abs_epi8(k: __mmask32, a: __m256i) -> __m256i { unsafe { let abs = _mm256_abs_epi8(a).as_i8x32(); transmute(simd_select_bitmask(k, abs, i8x32::ZERO)) @@ -184,7 +196,8 @@ pub fn _mm256_maskz_abs_epi8(k: __mmask32, a: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsb))] -pub fn _mm_mask_abs_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_abs_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { unsafe { let abs = _mm_abs_epi8(a).as_i8x16(); transmute(simd_select_bitmask(k, abs, src.as_i8x16())) @@ -198,7 +211,8 @@ pub fn _mm_mask_abs_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsb))] -pub fn _mm_maskz_abs_epi8(k: __mmask16, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_abs_epi8(k: __mmask16, a: __m128i) -> __m128i { unsafe { let abs = _mm_abs_epi8(a).as_i8x16(); transmute(simd_select_bitmask(k, abs, i8x16::ZERO)) @@ -212,7 +226,8 @@ pub fn _mm_maskz_abs_epi8(k: __mmask16, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddw))] -pub fn _mm512_add_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_add_epi16(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_add(a.as_i16x32(), b.as_i16x32())) } } @@ -223,7 +238,8 @@ pub fn _mm512_add_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddw))] -pub fn _mm512_mask_add_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_add_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_add_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, add, src.as_i16x32())) @@ -237,7 +253,8 @@ pub fn _mm512_mask_add_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddw))] -pub fn _mm512_maskz_add_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_add_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_add_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, add, i16x32::ZERO)) @@ -251,7 +268,8 @@ pub fn _mm512_maskz_add_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddw))] -pub fn _mm256_mask_add_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_add_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_add_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, add, src.as_i16x16())) @@ -265,7 +283,8 @@ pub fn _mm256_mask_add_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddw))] -pub fn _mm256_maskz_add_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_add_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_add_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, add, i16x16::ZERO)) @@ -279,7 +298,8 @@ pub fn _mm256_maskz_add_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddw))] -pub fn _mm_mask_add_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_add_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_add_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, add, src.as_i16x8())) @@ -293,7 +313,8 @@ pub fn _mm_mask_add_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddw))] -pub fn _mm_maskz_add_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_add_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_add_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, add, i16x8::ZERO)) @@ -307,7 +328,8 @@ pub fn _mm_maskz_add_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddb))] -pub fn _mm512_add_epi8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_add_epi8(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_add(a.as_i8x64(), b.as_i8x64())) } } @@ -318,7 +340,8 @@ pub fn _mm512_add_epi8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddb))] -pub fn _mm512_mask_add_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_add_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_add_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, add, src.as_i8x64())) @@ -332,7 +355,8 @@ pub fn _mm512_mask_add_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddb))] -pub fn _mm512_maskz_add_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_add_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_add_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, add, i8x64::ZERO)) @@ -346,7 +370,8 @@ pub fn _mm512_maskz_add_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddb))] -pub fn _mm256_mask_add_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_add_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_add_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, add, src.as_i8x32())) @@ -360,7 +385,8 @@ pub fn _mm256_mask_add_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddb))] -pub fn _mm256_maskz_add_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_add_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_add_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, add, i8x32::ZERO)) @@ -374,7 +400,8 @@ pub fn _mm256_maskz_add_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddb))] -pub fn _mm_mask_add_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_add_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_add_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, add, src.as_i8x16())) @@ -388,7 +415,8 @@ pub fn _mm_mask_add_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddb))] -pub fn _mm_maskz_add_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_add_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_add_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, add, i8x16::ZERO)) @@ -402,7 +430,8 @@ pub fn _mm_maskz_add_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusw))] -pub fn _mm512_adds_epu16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_adds_epu16(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_saturating_add(a.as_u16x32(), b.as_u16x32())) } } @@ -413,7 +442,8 @@ pub fn _mm512_adds_epu16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusw))] -pub fn _mm512_mask_adds_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_adds_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_adds_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, add, src.as_u16x32())) @@ -427,7 +457,8 @@ pub fn _mm512_mask_adds_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusw))] -pub fn _mm512_maskz_adds_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_adds_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_adds_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, add, u16x32::ZERO)) @@ -441,7 +472,8 @@ pub fn _mm512_maskz_adds_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusw))] -pub fn _mm256_mask_adds_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_adds_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_adds_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, add, src.as_u16x16())) @@ -455,7 +487,8 @@ pub fn _mm256_mask_adds_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusw))] -pub fn _mm256_maskz_adds_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_adds_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_adds_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, add, u16x16::ZERO)) @@ -469,7 +502,8 @@ pub fn _mm256_maskz_adds_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusw))] -pub fn _mm_mask_adds_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_adds_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_adds_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, add, src.as_u16x8())) @@ -483,7 +517,8 @@ pub fn _mm_mask_adds_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusw))] -pub fn _mm_maskz_adds_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_adds_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_adds_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, add, u16x8::ZERO)) @@ -497,7 +532,8 @@ pub fn _mm_maskz_adds_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusb))] -pub fn _mm512_adds_epu8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_adds_epu8(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_saturating_add(a.as_u8x64(), b.as_u8x64())) } } @@ -508,7 +544,8 @@ pub fn _mm512_adds_epu8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusb))] -pub fn _mm512_mask_adds_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_adds_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_adds_epu8(a, b).as_u8x64(); transmute(simd_select_bitmask(k, add, src.as_u8x64())) @@ -522,7 +559,8 @@ pub fn _mm512_mask_adds_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusb))] -pub fn _mm512_maskz_adds_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_adds_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_adds_epu8(a, b).as_u8x64(); transmute(simd_select_bitmask(k, add, u8x64::ZERO)) @@ -536,7 +574,8 @@ pub fn _mm512_maskz_adds_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusb))] -pub fn _mm256_mask_adds_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_adds_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_adds_epu8(a, b).as_u8x32(); transmute(simd_select_bitmask(k, add, src.as_u8x32())) @@ -550,7 +589,8 @@ pub fn _mm256_mask_adds_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusb))] -pub fn _mm256_maskz_adds_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_adds_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_adds_epu8(a, b).as_u8x32(); transmute(simd_select_bitmask(k, add, u8x32::ZERO)) @@ -564,7 +604,8 @@ pub fn _mm256_maskz_adds_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusb))] -pub fn _mm_mask_adds_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_adds_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_adds_epu8(a, b).as_u8x16(); transmute(simd_select_bitmask(k, add, src.as_u8x16())) @@ -578,7 +619,8 @@ pub fn _mm_mask_adds_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddusb))] -pub fn _mm_maskz_adds_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_adds_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_adds_epu8(a, b).as_u8x16(); transmute(simd_select_bitmask(k, add, u8x16::ZERO)) @@ -592,7 +634,8 @@ pub fn _mm_maskz_adds_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsw))] -pub fn _mm512_adds_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_adds_epi16(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_saturating_add(a.as_i16x32(), b.as_i16x32())) } } @@ -603,7 +646,8 @@ pub fn _mm512_adds_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsw))] -pub fn _mm512_mask_adds_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_adds_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_adds_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, add, src.as_i16x32())) @@ -617,7 +661,8 @@ pub fn _mm512_mask_adds_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsw))] -pub fn _mm512_maskz_adds_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_adds_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_adds_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, add, i16x32::ZERO)) @@ -631,7 +676,8 @@ pub fn _mm512_maskz_adds_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsw))] -pub fn _mm256_mask_adds_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_adds_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_adds_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, add, src.as_i16x16())) @@ -645,7 +691,8 @@ pub fn _mm256_mask_adds_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsw))] -pub fn _mm256_maskz_adds_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_adds_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_adds_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, add, i16x16::ZERO)) @@ -659,7 +706,8 @@ pub fn _mm256_maskz_adds_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsw))] -pub fn _mm_mask_adds_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_adds_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_adds_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, add, src.as_i16x8())) @@ -673,7 +721,8 @@ pub fn _mm_mask_adds_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsw))] -pub fn _mm_maskz_adds_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_adds_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_adds_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, add, i16x8::ZERO)) @@ -687,7 +736,8 @@ pub fn _mm_maskz_adds_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsb))] -pub fn _mm512_adds_epi8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_adds_epi8(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_saturating_add(a.as_i8x64(), b.as_i8x64())) } } @@ -698,7 +748,8 @@ pub fn _mm512_adds_epi8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsb))] -pub fn _mm512_mask_adds_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_adds_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_adds_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, add, src.as_i8x64())) @@ -712,7 +763,8 @@ pub fn _mm512_mask_adds_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsb))] -pub fn _mm512_maskz_adds_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_adds_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_adds_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, add, i8x64::ZERO)) @@ -726,7 +778,8 @@ pub fn _mm512_maskz_adds_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsb))] -pub fn _mm256_mask_adds_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_adds_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_adds_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, add, src.as_i8x32())) @@ -740,7 +793,8 @@ pub fn _mm256_mask_adds_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsb))] -pub fn _mm256_maskz_adds_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_adds_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_adds_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, add, i8x32::ZERO)) @@ -754,7 +808,8 @@ pub fn _mm256_maskz_adds_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsb))] -pub fn _mm_mask_adds_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_adds_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_adds_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, add, src.as_i8x16())) @@ -768,7 +823,8 @@ pub fn _mm_mask_adds_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddsb))] -pub fn _mm_maskz_adds_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_adds_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_adds_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, add, i8x16::ZERO)) @@ -782,7 +838,8 @@ pub fn _mm_maskz_adds_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubw))] -pub fn _mm512_sub_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_sub_epi16(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_sub(a.as_i16x32(), b.as_i16x32())) } } @@ -793,7 +850,8 @@ pub fn _mm512_sub_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubw))] -pub fn _mm512_mask_sub_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_sub_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_sub_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, sub, src.as_i16x32())) @@ -807,7 +865,8 @@ pub fn _mm512_mask_sub_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubw))] -pub fn _mm512_maskz_sub_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_sub_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_sub_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, sub, i16x32::ZERO)) @@ -821,7 +880,8 @@ pub fn _mm512_maskz_sub_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubw))] -pub fn _mm256_mask_sub_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_sub_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_sub_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, sub, src.as_i16x16())) @@ -835,7 +895,8 @@ pub fn _mm256_mask_sub_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubw))] -pub fn _mm256_maskz_sub_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_sub_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_sub_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, sub, i16x16::ZERO)) @@ -849,7 +910,8 @@ pub fn _mm256_maskz_sub_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubw))] -pub fn _mm_mask_sub_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sub_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_sub_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, sub, src.as_i16x8())) @@ -863,7 +925,8 @@ pub fn _mm_mask_sub_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubw))] -pub fn _mm_maskz_sub_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sub_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_sub_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, sub, i16x8::ZERO)) @@ -877,7 +940,8 @@ pub fn _mm_maskz_sub_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubb))] -pub fn _mm512_sub_epi8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_sub_epi8(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_sub(a.as_i8x64(), b.as_i8x64())) } } @@ -888,7 +952,8 @@ pub fn _mm512_sub_epi8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubb))] -pub fn _mm512_mask_sub_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_sub_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_sub_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, sub, src.as_i8x64())) @@ -902,7 +967,8 @@ pub fn _mm512_mask_sub_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubb))] -pub fn _mm512_maskz_sub_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_sub_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_sub_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, sub, i8x64::ZERO)) @@ -916,7 +982,8 @@ pub fn _mm512_maskz_sub_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubb))] -pub fn _mm256_mask_sub_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_sub_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_sub_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, sub, src.as_i8x32())) @@ -930,7 +997,8 @@ pub fn _mm256_mask_sub_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubb))] -pub fn _mm256_maskz_sub_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_sub_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_sub_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, sub, i8x32::ZERO)) @@ -944,7 +1012,8 @@ pub fn _mm256_maskz_sub_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubb))] -pub fn _mm_mask_sub_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sub_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_sub_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, sub, src.as_i8x16())) @@ -958,7 +1027,8 @@ pub fn _mm_mask_sub_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubb))] -pub fn _mm_maskz_sub_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sub_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_sub_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, sub, i8x16::ZERO)) @@ -972,7 +1042,8 @@ pub fn _mm_maskz_sub_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusw))] -pub fn _mm512_subs_epu16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_subs_epu16(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_saturating_sub(a.as_u16x32(), b.as_u16x32())) } } @@ -983,7 +1054,8 @@ pub fn _mm512_subs_epu16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusw))] -pub fn _mm512_mask_subs_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_subs_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_subs_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, sub, src.as_u16x32())) @@ -997,7 +1069,8 @@ pub fn _mm512_mask_subs_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusw))] -pub fn _mm512_maskz_subs_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_subs_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_subs_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, sub, u16x32::ZERO)) @@ -1011,7 +1084,8 @@ pub fn _mm512_maskz_subs_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusw))] -pub fn _mm256_mask_subs_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_subs_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_subs_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, sub, src.as_u16x16())) @@ -1025,7 +1099,8 @@ pub fn _mm256_mask_subs_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusw))] -pub fn _mm256_maskz_subs_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_subs_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_subs_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, sub, u16x16::ZERO)) @@ -1039,7 +1114,8 @@ pub fn _mm256_maskz_subs_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusw))] -pub fn _mm_mask_subs_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_subs_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_subs_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, sub, src.as_u16x8())) @@ -1053,7 +1129,8 @@ pub fn _mm_mask_subs_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusw))] -pub fn _mm_maskz_subs_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_subs_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_subs_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, sub, u16x8::ZERO)) @@ -1067,7 +1144,8 @@ pub fn _mm_maskz_subs_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusb))] -pub fn _mm512_subs_epu8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_subs_epu8(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_saturating_sub(a.as_u8x64(), b.as_u8x64())) } } @@ -1078,7 +1156,8 @@ pub fn _mm512_subs_epu8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusb))] -pub fn _mm512_mask_subs_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_subs_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_subs_epu8(a, b).as_u8x64(); transmute(simd_select_bitmask(k, sub, src.as_u8x64())) @@ -1092,7 +1171,8 @@ pub fn _mm512_mask_subs_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusb))] -pub fn _mm512_maskz_subs_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_subs_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_subs_epu8(a, b).as_u8x64(); transmute(simd_select_bitmask(k, sub, u8x64::ZERO)) @@ -1106,7 +1186,8 @@ pub fn _mm512_maskz_subs_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusb))] -pub fn _mm256_mask_subs_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_subs_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_subs_epu8(a, b).as_u8x32(); transmute(simd_select_bitmask(k, sub, src.as_u8x32())) @@ -1120,7 +1201,8 @@ pub fn _mm256_mask_subs_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusb))] -pub fn _mm256_maskz_subs_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_subs_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_subs_epu8(a, b).as_u8x32(); transmute(simd_select_bitmask(k, sub, u8x32::ZERO)) @@ -1134,7 +1216,8 @@ pub fn _mm256_maskz_subs_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusb))] -pub fn _mm_mask_subs_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_subs_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_subs_epu8(a, b).as_u8x16(); transmute(simd_select_bitmask(k, sub, src.as_u8x16())) @@ -1148,7 +1231,8 @@ pub fn _mm_mask_subs_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubusb))] -pub fn _mm_maskz_subs_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_subs_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_subs_epu8(a, b).as_u8x16(); transmute(simd_select_bitmask(k, sub, u8x16::ZERO)) @@ -1162,7 +1246,8 @@ pub fn _mm_maskz_subs_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsw))] -pub fn _mm512_subs_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_subs_epi16(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_saturating_sub(a.as_i16x32(), b.as_i16x32())) } } @@ -1173,7 +1258,8 @@ pub fn _mm512_subs_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsw))] -pub fn _mm512_mask_subs_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_subs_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_subs_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, sub, src.as_i16x32())) @@ -1187,7 +1273,8 @@ pub fn _mm512_mask_subs_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsw))] -pub fn _mm512_maskz_subs_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_subs_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_subs_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, sub, i16x32::ZERO)) @@ -1201,7 +1288,8 @@ pub fn _mm512_maskz_subs_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsw))] -pub fn _mm256_mask_subs_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_subs_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_subs_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, sub, src.as_i16x16())) @@ -1215,7 +1303,8 @@ pub fn _mm256_mask_subs_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsw))] -pub fn _mm256_maskz_subs_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_subs_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_subs_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, sub, i16x16::ZERO)) @@ -1229,7 +1318,8 @@ pub fn _mm256_maskz_subs_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsw))] -pub fn _mm_mask_subs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_subs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_subs_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, sub, src.as_i16x8())) @@ -1243,7 +1333,8 @@ pub fn _mm_mask_subs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsw))] -pub fn _mm_maskz_subs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_subs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_subs_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, sub, i16x8::ZERO)) @@ -1257,7 +1348,8 @@ pub fn _mm_maskz_subs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsb))] -pub fn _mm512_subs_epi8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_subs_epi8(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_saturating_sub(a.as_i8x64(), b.as_i8x64())) } } @@ -1268,7 +1360,8 @@ pub fn _mm512_subs_epi8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsb))] -pub fn _mm512_mask_subs_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_subs_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_subs_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, sub, src.as_i8x64())) @@ -1282,7 +1375,8 @@ pub fn _mm512_mask_subs_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsb))] -pub fn _mm512_maskz_subs_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_subs_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_subs_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, sub, i8x64::ZERO)) @@ -1296,7 +1390,8 @@ pub fn _mm512_maskz_subs_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsb))] -pub fn _mm256_mask_subs_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_subs_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_subs_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, sub, src.as_i8x32())) @@ -1310,7 +1405,8 @@ pub fn _mm256_mask_subs_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsb))] -pub fn _mm256_maskz_subs_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_subs_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_subs_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, sub, i8x32::ZERO)) @@ -1324,7 +1420,8 @@ pub fn _mm256_maskz_subs_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsb))] -pub fn _mm_mask_subs_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_subs_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_subs_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, sub, src.as_i8x16())) @@ -1338,7 +1435,8 @@ pub fn _mm_mask_subs_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubsb))] -pub fn _mm_maskz_subs_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_subs_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_subs_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, sub, i8x16::ZERO)) @@ -1352,7 +1450,8 @@ pub fn _mm_maskz_subs_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhuw))] -pub fn _mm512_mulhi_epu16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mulhi_epu16(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = simd_cast::<_, u32x32>(a.as_u16x32()); let b = simd_cast::<_, u32x32>(b.as_u16x32()); @@ -1368,7 +1467,13 @@ pub fn _mm512_mulhi_epu16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhuw))] -pub fn _mm512_mask_mulhi_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mulhi_epu16( + src: __m512i, + k: __mmask32, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let mul = _mm512_mulhi_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, mul, src.as_u16x32())) @@ -1382,7 +1487,8 @@ pub fn _mm512_mask_mulhi_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512 #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhuw))] -pub fn _mm512_maskz_mulhi_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mulhi_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let mul = _mm512_mulhi_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, mul, u16x32::ZERO)) @@ -1396,7 +1502,13 @@ pub fn _mm512_maskz_mulhi_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhuw))] -pub fn _mm256_mask_mulhi_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mulhi_epu16( + src: __m256i, + k: __mmask16, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let mul = _mm256_mulhi_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, mul, src.as_u16x16())) @@ -1410,7 +1522,8 @@ pub fn _mm256_mask_mulhi_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhuw))] -pub fn _mm256_maskz_mulhi_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mulhi_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let mul = _mm256_mulhi_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, mul, u16x16::ZERO)) @@ -1424,7 +1537,8 @@ pub fn _mm256_maskz_mulhi_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhuw))] -pub fn _mm_mask_mulhi_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mulhi_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mulhi_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, mul, src.as_u16x8())) @@ -1438,7 +1552,8 @@ pub fn _mm_mask_mulhi_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) - #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhuw))] -pub fn _mm_maskz_mulhi_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mulhi_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mulhi_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, mul, u16x8::ZERO)) @@ -1452,7 +1567,8 @@ pub fn _mm_maskz_mulhi_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhw))] -pub fn _mm512_mulhi_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mulhi_epi16(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = simd_cast::<_, i32x32>(a.as_i16x32()); let b = simd_cast::<_, i32x32>(b.as_i16x32()); @@ -1468,7 +1584,13 @@ pub fn _mm512_mulhi_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhw))] -pub fn _mm512_mask_mulhi_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mulhi_epi16( + src: __m512i, + k: __mmask32, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let mul = _mm512_mulhi_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, mul, src.as_i16x32())) @@ -1482,7 +1604,8 @@ pub fn _mm512_mask_mulhi_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512 #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhw))] -pub fn _mm512_maskz_mulhi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mulhi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let mul = _mm512_mulhi_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, mul, i16x32::ZERO)) @@ -1496,7 +1619,13 @@ pub fn _mm512_maskz_mulhi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhw))] -pub fn _mm256_mask_mulhi_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mulhi_epi16( + src: __m256i, + k: __mmask16, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let mul = _mm256_mulhi_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, mul, src.as_i16x16())) @@ -1510,7 +1639,8 @@ pub fn _mm256_mask_mulhi_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhw))] -pub fn _mm256_maskz_mulhi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mulhi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let mul = _mm256_mulhi_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, mul, i16x16::ZERO)) @@ -1524,7 +1654,8 @@ pub fn _mm256_maskz_mulhi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhw))] -pub fn _mm_mask_mulhi_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mulhi_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mulhi_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, mul, src.as_i16x8())) @@ -1538,7 +1669,8 @@ pub fn _mm_mask_mulhi_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) - #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulhw))] -pub fn _mm_maskz_mulhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mulhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mulhi_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, mul, i16x8::ZERO)) @@ -1647,7 +1779,8 @@ pub fn _mm_maskz_mulhrs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmullw))] -pub fn _mm512_mullo_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mullo_epi16(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_mul(a.as_i16x32(), b.as_i16x32())) } } @@ -1658,7 +1791,13 @@ pub fn _mm512_mullo_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmullw))] -pub fn _mm512_mask_mullo_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mullo_epi16( + src: __m512i, + k: __mmask32, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let mul = _mm512_mullo_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, mul, src.as_i16x32())) @@ -1672,7 +1811,8 @@ pub fn _mm512_mask_mullo_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512 #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmullw))] -pub fn _mm512_maskz_mullo_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mullo_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let mul = _mm512_mullo_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, mul, i16x32::ZERO)) @@ -1686,7 +1826,13 @@ pub fn _mm512_maskz_mullo_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmullw))] -pub fn _mm256_mask_mullo_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mullo_epi16( + src: __m256i, + k: __mmask16, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let mul = _mm256_mullo_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, mul, src.as_i16x16())) @@ -1700,7 +1846,8 @@ pub fn _mm256_mask_mullo_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmullw))] -pub fn _mm256_maskz_mullo_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mullo_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let mul = _mm256_mullo_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, mul, i16x16::ZERO)) @@ -1714,7 +1861,8 @@ pub fn _mm256_maskz_mullo_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmullw))] -pub fn _mm_mask_mullo_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mullo_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mullo_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, mul, src.as_i16x8())) @@ -1728,7 +1876,8 @@ pub fn _mm_mask_mullo_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) - #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmullw))] -pub fn _mm_maskz_mullo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mullo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mullo_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, mul, i16x8::ZERO)) @@ -1742,7 +1891,8 @@ pub fn _mm_maskz_mullo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuw))] -pub fn _mm512_max_epu16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_max_epu16(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imax(a.as_u16x32(), b.as_u16x32()).as_m512i() } } @@ -1753,7 +1903,8 @@ pub fn _mm512_max_epu16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuw))] -pub fn _mm512_mask_max_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_max_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, max, src.as_u16x32())) @@ -1767,7 +1918,8 @@ pub fn _mm512_mask_max_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuw))] -pub fn _mm512_maskz_max_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_max_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, max, u16x32::ZERO)) @@ -1781,7 +1933,8 @@ pub fn _mm512_maskz_max_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuw))] -pub fn _mm256_mask_max_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_max_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, max, src.as_u16x16())) @@ -1795,7 +1948,8 @@ pub fn _mm256_mask_max_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuw))] -pub fn _mm256_maskz_max_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_max_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, max, u16x16::ZERO)) @@ -1809,7 +1963,8 @@ pub fn _mm256_maskz_max_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuw))] -pub fn _mm_mask_max_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_max_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, max, src.as_u16x8())) @@ -1823,7 +1978,8 @@ pub fn _mm_mask_max_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuw))] -pub fn _mm_maskz_max_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_max_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, max, u16x8::ZERO)) @@ -1837,7 +1993,8 @@ pub fn _mm_maskz_max_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxub))] -pub fn _mm512_max_epu8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_max_epu8(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imax(a.as_u8x64(), b.as_u8x64()).as_m512i() } } @@ -1848,7 +2005,8 @@ pub fn _mm512_max_epu8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxub))] -pub fn _mm512_mask_max_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_max_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epu8(a, b).as_u8x64(); transmute(simd_select_bitmask(k, max, src.as_u8x64())) @@ -1862,7 +2020,8 @@ pub fn _mm512_mask_max_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxub))] -pub fn _mm512_maskz_max_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_max_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epu8(a, b).as_u8x64(); transmute(simd_select_bitmask(k, max, u8x64::ZERO)) @@ -1876,7 +2035,8 @@ pub fn _mm512_maskz_max_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxub))] -pub fn _mm256_mask_max_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_max_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epu8(a, b).as_u8x32(); transmute(simd_select_bitmask(k, max, src.as_u8x32())) @@ -1890,7 +2050,8 @@ pub fn _mm256_mask_max_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxub))] -pub fn _mm256_maskz_max_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_max_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epu8(a, b).as_u8x32(); transmute(simd_select_bitmask(k, max, u8x32::ZERO)) @@ -1904,7 +2065,8 @@ pub fn _mm256_maskz_max_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxub))] -pub fn _mm_mask_max_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_max_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epu8(a, b).as_u8x16(); transmute(simd_select_bitmask(k, max, src.as_u8x16())) @@ -1918,7 +2080,8 @@ pub fn _mm_mask_max_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxub))] -pub fn _mm_maskz_max_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_max_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epu8(a, b).as_u8x16(); transmute(simd_select_bitmask(k, max, u8x16::ZERO)) @@ -1932,7 +2095,8 @@ pub fn _mm_maskz_max_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsw))] -pub fn _mm512_max_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_max_epi16(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imax(a.as_i16x32(), b.as_i16x32()).as_m512i() } } @@ -1943,7 +2107,8 @@ pub fn _mm512_max_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsw))] -pub fn _mm512_mask_max_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_max_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, max, src.as_i16x32())) @@ -1957,7 +2122,8 @@ pub fn _mm512_mask_max_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsw))] -pub fn _mm512_maskz_max_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_max_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, max, i16x32::ZERO)) @@ -1971,7 +2137,8 @@ pub fn _mm512_maskz_max_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsw))] -pub fn _mm256_mask_max_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_max_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, max, src.as_i16x16())) @@ -1985,7 +2152,8 @@ pub fn _mm256_mask_max_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsw))] -pub fn _mm256_maskz_max_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_max_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, max, i16x16::ZERO)) @@ -1999,7 +2167,8 @@ pub fn _mm256_maskz_max_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsw))] -pub fn _mm_mask_max_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_max_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, max, src.as_i16x8())) @@ -2013,7 +2182,8 @@ pub fn _mm_mask_max_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsw))] -pub fn _mm_maskz_max_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_max_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, max, i16x8::ZERO)) @@ -2027,7 +2197,8 @@ pub fn _mm_maskz_max_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsb))] -pub fn _mm512_max_epi8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_max_epi8(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imax(a.as_i8x64(), b.as_i8x64()).as_m512i() } } @@ -2038,7 +2209,8 @@ pub fn _mm512_max_epi8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsb))] -pub fn _mm512_mask_max_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_max_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, max, src.as_i8x64())) @@ -2052,7 +2224,8 @@ pub fn _mm512_mask_max_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsb))] -pub fn _mm512_maskz_max_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_max_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, max, i8x64::ZERO)) @@ -2066,7 +2239,8 @@ pub fn _mm512_maskz_max_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsb))] -pub fn _mm256_mask_max_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_max_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, max, src.as_i8x32())) @@ -2080,7 +2254,8 @@ pub fn _mm256_mask_max_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsb))] -pub fn _mm256_maskz_max_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_max_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, max, i8x32::ZERO)) @@ -2094,7 +2269,8 @@ pub fn _mm256_maskz_max_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsb))] -pub fn _mm_mask_max_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_max_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, max, src.as_i8x16())) @@ -2108,7 +2284,8 @@ pub fn _mm_mask_max_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsb))] -pub fn _mm_maskz_max_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_max_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, max, i8x16::ZERO)) @@ -2122,7 +2299,8 @@ pub fn _mm_maskz_max_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuw))] -pub fn _mm512_min_epu16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_min_epu16(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imin(a.as_u16x32(), b.as_u16x32()).as_m512i() } } @@ -2133,7 +2311,8 @@ pub fn _mm512_min_epu16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuw))] -pub fn _mm512_mask_min_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_min_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, min, src.as_u16x32())) @@ -2147,7 +2326,8 @@ pub fn _mm512_mask_min_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuw))] -pub fn _mm512_maskz_min_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_min_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, min, u16x32::ZERO)) @@ -2161,7 +2341,8 @@ pub fn _mm512_maskz_min_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuw))] -pub fn _mm256_mask_min_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_min_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, min, src.as_u16x16())) @@ -2175,7 +2356,8 @@ pub fn _mm256_mask_min_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuw))] -pub fn _mm256_maskz_min_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_min_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, min, u16x16::ZERO)) @@ -2189,7 +2371,8 @@ pub fn _mm256_maskz_min_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuw))] -pub fn _mm_mask_min_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_min_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, min, src.as_u16x8())) @@ -2203,7 +2386,8 @@ pub fn _mm_mask_min_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuw))] -pub fn _mm_maskz_min_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_min_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, min, u16x8::ZERO)) @@ -2217,7 +2401,8 @@ pub fn _mm_maskz_min_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminub))] -pub fn _mm512_min_epu8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_min_epu8(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imin(a.as_u8x64(), b.as_u8x64()).as_m512i() } } @@ -2228,7 +2413,8 @@ pub fn _mm512_min_epu8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminub))] -pub fn _mm512_mask_min_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_min_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epu8(a, b).as_u8x64(); transmute(simd_select_bitmask(k, min, src.as_u8x64())) @@ -2242,7 +2428,8 @@ pub fn _mm512_mask_min_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminub))] -pub fn _mm512_maskz_min_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_min_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epu8(a, b).as_u8x64(); transmute(simd_select_bitmask(k, min, u8x64::ZERO)) @@ -2256,7 +2443,8 @@ pub fn _mm512_maskz_min_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminub))] -pub fn _mm256_mask_min_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_min_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epu8(a, b).as_u8x32(); transmute(simd_select_bitmask(k, min, src.as_u8x32())) @@ -2270,7 +2458,8 @@ pub fn _mm256_mask_min_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminub))] -pub fn _mm256_maskz_min_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_min_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epu8(a, b).as_u8x32(); transmute(simd_select_bitmask(k, min, u8x32::ZERO)) @@ -2284,7 +2473,8 @@ pub fn _mm256_maskz_min_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminub))] -pub fn _mm_mask_min_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_min_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epu8(a, b).as_u8x16(); transmute(simd_select_bitmask(k, min, src.as_u8x16())) @@ -2298,7 +2488,8 @@ pub fn _mm_mask_min_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminub))] -pub fn _mm_maskz_min_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_min_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epu8(a, b).as_u8x16(); transmute(simd_select_bitmask(k, min, u8x16::ZERO)) @@ -2312,7 +2503,8 @@ pub fn _mm_maskz_min_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsw))] -pub fn _mm512_min_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_min_epi16(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imin(a.as_i16x32(), b.as_i16x32()).as_m512i() } } @@ -2323,7 +2515,8 @@ pub fn _mm512_min_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsw))] -pub fn _mm512_mask_min_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_min_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, min, src.as_i16x32())) @@ -2337,7 +2530,8 @@ pub fn _mm512_mask_min_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsw))] -pub fn _mm512_maskz_min_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_min_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, min, i16x32::ZERO)) @@ -2351,7 +2545,8 @@ pub fn _mm512_maskz_min_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsw))] -pub fn _mm256_mask_min_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_min_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, min, src.as_i16x16())) @@ -2365,7 +2560,8 @@ pub fn _mm256_mask_min_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsw))] -pub fn _mm256_maskz_min_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_min_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, min, i16x16::ZERO)) @@ -2379,7 +2575,8 @@ pub fn _mm256_maskz_min_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsw))] -pub fn _mm_mask_min_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_min_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, min, src.as_i16x8())) @@ -2393,7 +2590,8 @@ pub fn _mm_mask_min_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsw))] -pub fn _mm_maskz_min_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_min_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, min, i16x8::ZERO)) @@ -2407,7 +2605,8 @@ pub fn _mm_maskz_min_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsb))] -pub fn _mm512_min_epi8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_min_epi8(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imin(a.as_i8x64(), b.as_i8x64()).as_m512i() } } @@ -2418,7 +2617,8 @@ pub fn _mm512_min_epi8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsb))] -pub fn _mm512_mask_min_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_min_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, min, src.as_i8x64())) @@ -2432,7 +2632,8 @@ pub fn _mm512_mask_min_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsb))] -pub fn _mm512_maskz_min_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_min_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, min, i8x64::ZERO)) @@ -2446,7 +2647,8 @@ pub fn _mm512_maskz_min_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsb))] -pub fn _mm256_mask_min_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_min_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, min, src.as_i8x32())) @@ -2460,7 +2662,8 @@ pub fn _mm256_mask_min_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsb))] -pub fn _mm256_maskz_min_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_min_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, min, i8x32::ZERO)) @@ -2474,7 +2677,8 @@ pub fn _mm256_maskz_min_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsb))] -pub fn _mm_mask_min_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_min_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, min, src.as_i8x16())) @@ -2488,7 +2692,8 @@ pub fn _mm_mask_min_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsb))] -pub fn _mm_maskz_min_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_min_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, min, i8x16::ZERO)) @@ -2502,7 +2707,8 @@ pub fn _mm_maskz_min_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmplt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmplt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_lt(a.as_u16x32(), b.as_u16x32())) } } @@ -2513,7 +2719,8 @@ pub fn _mm512_cmplt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmplt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmplt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epu16_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2524,7 +2731,8 @@ pub fn _mm512_mask_cmplt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmplt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmplt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_lt(a.as_u16x16(), b.as_u16x16())) } } @@ -2535,7 +2743,8 @@ pub fn _mm256_cmplt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmplt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmplt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epu16_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2546,7 +2755,8 @@ pub fn _mm256_mask_cmplt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmplt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmplt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_lt(a.as_u16x8(), b.as_u16x8())) } } @@ -2557,7 +2767,8 @@ pub fn _mm_cmplt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmplt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmplt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu16_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2568,7 +2779,8 @@ pub fn _mm_mask_cmplt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmplt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmplt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_lt(a.as_u8x64(), b.as_u8x64())) } } @@ -2579,7 +2791,8 @@ pub fn _mm512_cmplt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmplt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmplt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epu8_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2590,7 +2803,8 @@ pub fn _mm512_mask_cmplt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmplt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmplt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_lt(a.as_u8x32(), b.as_u8x32())) } } @@ -2601,7 +2815,8 @@ pub fn _mm256_cmplt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmplt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmplt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epu8_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2612,7 +2827,8 @@ pub fn _mm256_mask_cmplt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmplt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmplt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_lt(a.as_u8x16(), b.as_u8x16())) } } @@ -2623,7 +2839,8 @@ pub fn _mm_cmplt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmplt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmplt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epu8_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2634,7 +2851,8 @@ pub fn _mm_mask_cmplt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmplt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmplt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_lt(a.as_i16x32(), b.as_i16x32())) } } @@ -2645,7 +2863,8 @@ pub fn _mm512_cmplt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmplt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmplt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epi16_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2656,7 +2875,8 @@ pub fn _mm512_mask_cmplt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmplt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmplt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_lt(a.as_i16x16(), b.as_i16x16())) } } @@ -2667,7 +2887,8 @@ pub fn _mm256_cmplt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmplt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmplt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epi16_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2678,7 +2899,8 @@ pub fn _mm256_mask_cmplt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmplt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmplt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_lt(a.as_i16x8(), b.as_i16x8())) } } @@ -2689,7 +2911,8 @@ pub fn _mm_cmplt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmplt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmplt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi16_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2700,7 +2923,8 @@ pub fn _mm_mask_cmplt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmplt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmplt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_lt(a.as_i8x64(), b.as_i8x64())) } } @@ -2711,7 +2935,8 @@ pub fn _mm512_cmplt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmplt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmplt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epi8_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2722,7 +2947,8 @@ pub fn _mm512_mask_cmplt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmplt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmplt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_lt(a.as_i8x32(), b.as_i8x32())) } } @@ -2733,7 +2959,8 @@ pub fn _mm256_cmplt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmplt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmplt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epi8_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2744,7 +2971,8 @@ pub fn _mm256_mask_cmplt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmplt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmplt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_lt(a.as_i8x16(), b.as_i8x16())) } } @@ -2755,7 +2983,8 @@ pub fn _mm_cmplt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmplt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmplt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epi8_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -2766,7 +2995,8 @@ pub fn _mm_mask_cmplt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpgt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpgt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_gt(a.as_u16x32(), b.as_u16x32())) } } @@ -2777,7 +3007,8 @@ pub fn _mm512_cmpgt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpgt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpgt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epu16_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -2788,7 +3019,8 @@ pub fn _mm512_mask_cmpgt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpgt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_gt(a.as_u16x16(), b.as_u16x16())) } } @@ -2799,7 +3031,8 @@ pub fn _mm256_cmpgt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpgt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpgt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epu16_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -2810,7 +3043,8 @@ pub fn _mm256_mask_cmpgt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpgt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_gt(a.as_u16x8(), b.as_u16x8())) } } @@ -2821,7 +3055,8 @@ pub fn _mm_cmpgt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpgt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpgt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu16_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -2832,7 +3067,8 @@ pub fn _mm_mask_cmpgt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpgt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpgt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_gt(a.as_u8x64(), b.as_u8x64())) } } @@ -2843,7 +3079,8 @@ pub fn _mm512_cmpgt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpgt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpgt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epu8_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -2854,7 +3091,8 @@ pub fn _mm512_mask_cmpgt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpgt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_gt(a.as_u8x32(), b.as_u8x32())) } } @@ -2865,7 +3103,8 @@ pub fn _mm256_cmpgt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpgt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpgt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epu8_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -2876,7 +3115,8 @@ pub fn _mm256_mask_cmpgt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpgt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_gt(a.as_u8x16(), b.as_u8x16())) } } @@ -2887,7 +3127,8 @@ pub fn _mm_cmpgt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpgt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpgt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epu8_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -2898,7 +3139,8 @@ pub fn _mm_mask_cmpgt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpgt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpgt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_gt(a.as_i16x32(), b.as_i16x32())) } } @@ -2909,7 +3151,8 @@ pub fn _mm512_cmpgt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpgt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpgt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epi16_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -2920,7 +3163,8 @@ pub fn _mm512_mask_cmpgt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpgt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_gt(a.as_i16x16(), b.as_i16x16())) } } @@ -2931,7 +3175,8 @@ pub fn _mm256_cmpgt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpgt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpgt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epi16_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -2942,7 +3187,8 @@ pub fn _mm256_mask_cmpgt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpgt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_gt(a.as_i16x8(), b.as_i16x8())) } } @@ -2953,7 +3199,8 @@ pub fn _mm_cmpgt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpgt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpgt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi16_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -2964,7 +3211,8 @@ pub fn _mm_mask_cmpgt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpgt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpgt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_gt(a.as_i8x64(), b.as_i8x64())) } } @@ -2975,7 +3223,8 @@ pub fn _mm512_cmpgt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpgt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpgt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epi8_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -2986,7 +3235,8 @@ pub fn _mm512_mask_cmpgt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpgt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_gt(a.as_i8x32(), b.as_i8x32())) } } @@ -2997,7 +3247,8 @@ pub fn _mm256_cmpgt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpgt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpgt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epi8_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -3008,7 +3259,8 @@ pub fn _mm256_mask_cmpgt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpgt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_gt(a.as_i8x16(), b.as_i8x16())) } } @@ -3019,7 +3271,8 @@ pub fn _mm_cmpgt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpgt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpgt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epi8_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -3030,7 +3283,8 @@ pub fn _mm_mask_cmpgt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmple_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmple_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_le(a.as_u16x32(), b.as_u16x32())) } } @@ -3041,7 +3295,8 @@ pub fn _mm512_cmple_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmple_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmple_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epu16_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3052,7 +3307,8 @@ pub fn _mm512_mask_cmple_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmple_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmple_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_le(a.as_u16x16(), b.as_u16x16())) } } @@ -3063,7 +3319,8 @@ pub fn _mm256_cmple_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmple_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmple_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epu16_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3074,7 +3331,8 @@ pub fn _mm256_mask_cmple_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmple_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmple_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_le(a.as_u16x8(), b.as_u16x8())) } } @@ -3085,7 +3343,8 @@ pub fn _mm_cmple_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmple_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmple_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu16_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3096,7 +3355,8 @@ pub fn _mm_mask_cmple_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmple_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmple_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_le(a.as_u8x64(), b.as_u8x64())) } } @@ -3107,7 +3367,8 @@ pub fn _mm512_cmple_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmple_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmple_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epu8_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3118,7 +3379,8 @@ pub fn _mm512_mask_cmple_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmple_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmple_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_le(a.as_u8x32(), b.as_u8x32())) } } @@ -3129,7 +3391,8 @@ pub fn _mm256_cmple_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmple_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmple_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epu8_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3140,7 +3403,8 @@ pub fn _mm256_mask_cmple_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmple_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmple_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_le(a.as_u8x16(), b.as_u8x16())) } } @@ -3151,7 +3415,8 @@ pub fn _mm_cmple_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmple_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmple_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epu8_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3162,7 +3427,8 @@ pub fn _mm_mask_cmple_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmple_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmple_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_le(a.as_i16x32(), b.as_i16x32())) } } @@ -3173,7 +3439,8 @@ pub fn _mm512_cmple_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmple_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmple_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epi16_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3184,7 +3451,8 @@ pub fn _mm512_mask_cmple_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmple_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmple_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_le(a.as_i16x16(), b.as_i16x16())) } } @@ -3195,7 +3463,8 @@ pub fn _mm256_cmple_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmple_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmple_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epi16_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3206,7 +3475,8 @@ pub fn _mm256_mask_cmple_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmple_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmple_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_le(a.as_i16x8(), b.as_i16x8())) } } @@ -3217,7 +3487,8 @@ pub fn _mm_cmple_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmple_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmple_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi16_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3228,7 +3499,8 @@ pub fn _mm_mask_cmple_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmple_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmple_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_le(a.as_i8x64(), b.as_i8x64())) } } @@ -3239,7 +3511,8 @@ pub fn _mm512_cmple_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmple_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmple_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epi8_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3250,7 +3523,8 @@ pub fn _mm512_mask_cmple_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmple_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmple_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_le(a.as_i8x32(), b.as_i8x32())) } } @@ -3261,7 +3535,8 @@ pub fn _mm256_cmple_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmple_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmple_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epi8_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3272,7 +3547,8 @@ pub fn _mm256_mask_cmple_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmple_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmple_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_le(a.as_i8x16(), b.as_i8x16())) } } @@ -3283,7 +3559,8 @@ pub fn _mm_cmple_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmple_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmple_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epi8_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -3294,7 +3571,8 @@ pub fn _mm_mask_cmple_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpge_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpge_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_ge(a.as_u16x32(), b.as_u16x32())) } } @@ -3305,7 +3583,8 @@ pub fn _mm512_cmpge_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpge_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpge_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epu16_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3316,7 +3595,8 @@ pub fn _mm512_mask_cmpge_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpge_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpge_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_ge(a.as_u16x16(), b.as_u16x16())) } } @@ -3327,7 +3607,8 @@ pub fn _mm256_cmpge_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpge_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpge_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epu16_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3338,7 +3619,8 @@ pub fn _mm256_mask_cmpge_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpge_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpge_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_ge(a.as_u16x8(), b.as_u16x8())) } } @@ -3349,7 +3631,8 @@ pub fn _mm_cmpge_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpge_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpge_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu16_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3360,7 +3643,8 @@ pub fn _mm_mask_cmpge_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpge_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpge_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_ge(a.as_u8x64(), b.as_u8x64())) } } @@ -3371,7 +3655,8 @@ pub fn _mm512_cmpge_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpge_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpge_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epu8_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3382,7 +3667,8 @@ pub fn _mm512_mask_cmpge_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpge_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpge_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_ge(a.as_u8x32(), b.as_u8x32())) } } @@ -3393,7 +3679,8 @@ pub fn _mm256_cmpge_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpge_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpge_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epu8_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3404,7 +3691,8 @@ pub fn _mm256_mask_cmpge_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpge_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpge_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_ge(a.as_u8x16(), b.as_u8x16())) } } @@ -3415,7 +3703,8 @@ pub fn _mm_cmpge_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpge_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpge_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epu8_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3426,7 +3715,8 @@ pub fn _mm_mask_cmpge_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpge_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpge_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_ge(a.as_i16x32(), b.as_i16x32())) } } @@ -3437,7 +3727,8 @@ pub fn _mm512_cmpge_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpge_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpge_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epi16_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3448,7 +3739,8 @@ pub fn _mm512_mask_cmpge_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpge_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpge_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_ge(a.as_i16x16(), b.as_i16x16())) } } @@ -3459,7 +3751,8 @@ pub fn _mm256_cmpge_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpge_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpge_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epi16_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3470,7 +3763,8 @@ pub fn _mm256_mask_cmpge_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpge_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpge_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_ge(a.as_i16x8(), b.as_i16x8())) } } @@ -3481,7 +3775,8 @@ pub fn _mm_cmpge_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpge_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpge_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi16_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3492,7 +3787,8 @@ pub fn _mm_mask_cmpge_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpge_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpge_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_ge(a.as_i8x64(), b.as_i8x64())) } } @@ -3503,7 +3799,8 @@ pub fn _mm512_cmpge_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpge_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpge_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epi8_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3514,7 +3811,8 @@ pub fn _mm512_mask_cmpge_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpge_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpge_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_ge(a.as_i8x32(), b.as_i8x32())) } } @@ -3525,7 +3823,8 @@ pub fn _mm256_cmpge_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpge_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpge_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epi8_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3536,7 +3835,8 @@ pub fn _mm256_mask_cmpge_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpge_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpge_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_ge(a.as_i8x16(), b.as_i8x16())) } } @@ -3547,7 +3847,8 @@ pub fn _mm_cmpge_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpge_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpge_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epi8_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -3558,7 +3859,8 @@ pub fn _mm_mask_cmpge_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpeq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpeq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_eq(a.as_u16x32(), b.as_u16x32())) } } @@ -3569,7 +3871,8 @@ pub fn _mm512_cmpeq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpeq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpeq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epu16_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3580,7 +3883,8 @@ pub fn _mm512_mask_cmpeq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpeq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_eq(a.as_u16x16(), b.as_u16x16())) } } @@ -3591,7 +3895,8 @@ pub fn _mm256_cmpeq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpeq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpeq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epu16_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3602,7 +3907,8 @@ pub fn _mm256_mask_cmpeq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpeq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_eq(a.as_u16x8(), b.as_u16x8())) } } @@ -3613,7 +3919,8 @@ pub fn _mm_cmpeq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpeq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpeq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu16_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3624,7 +3931,8 @@ pub fn _mm_mask_cmpeq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpeq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpeq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_eq(a.as_u8x64(), b.as_u8x64())) } } @@ -3635,7 +3943,8 @@ pub fn _mm512_cmpeq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpeq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpeq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epu8_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3646,7 +3955,8 @@ pub fn _mm512_mask_cmpeq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpeq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_eq(a.as_u8x32(), b.as_u8x32())) } } @@ -3657,7 +3967,8 @@ pub fn _mm256_cmpeq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpeq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpeq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epu8_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3668,7 +3979,8 @@ pub fn _mm256_mask_cmpeq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpeq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_eq(a.as_u8x16(), b.as_u8x16())) } } @@ -3679,7 +3991,8 @@ pub fn _mm_cmpeq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpeq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpeq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epu8_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3690,7 +4003,8 @@ pub fn _mm_mask_cmpeq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpeq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpeq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_eq(a.as_i16x32(), b.as_i16x32())) } } @@ -3701,7 +4015,8 @@ pub fn _mm512_cmpeq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpeq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpeq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epi16_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3712,7 +4027,8 @@ pub fn _mm512_mask_cmpeq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpeq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_eq(a.as_i16x16(), b.as_i16x16())) } } @@ -3723,7 +4039,8 @@ pub fn _mm256_cmpeq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpeq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpeq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epi16_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3734,7 +4051,8 @@ pub fn _mm256_mask_cmpeq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpeq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_eq(a.as_i16x8(), b.as_i16x8())) } } @@ -3745,7 +4063,8 @@ pub fn _mm_cmpeq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpeq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpeq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi16_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3756,7 +4075,8 @@ pub fn _mm_mask_cmpeq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpeq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpeq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_eq(a.as_i8x64(), b.as_i8x64())) } } @@ -3767,7 +4087,8 @@ pub fn _mm512_cmpeq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpeq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpeq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epi8_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3778,7 +4099,8 @@ pub fn _mm512_mask_cmpeq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpeq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_eq(a.as_i8x32(), b.as_i8x32())) } } @@ -3789,7 +4111,8 @@ pub fn _mm256_cmpeq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpeq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpeq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epi8_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3800,7 +4123,8 @@ pub fn _mm256_mask_cmpeq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpeq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_eq(a.as_i8x16(), b.as_i8x16())) } } @@ -3811,7 +4135,8 @@ pub fn _mm_cmpeq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpeq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpeq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epi8_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -3822,7 +4147,8 @@ pub fn _mm_mask_cmpeq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpneq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpneq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_ne(a.as_u16x32(), b.as_u16x32())) } } @@ -3833,7 +4159,8 @@ pub fn _mm512_cmpneq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpneq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpneq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epu16_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -3844,7 +4171,8 @@ pub fn _mm512_mask_cmpneq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> _ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpneq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpneq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_ne(a.as_u16x16(), b.as_u16x16())) } } @@ -3855,7 +4183,8 @@ pub fn _mm256_cmpneq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpneq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpneq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epu16_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -3866,7 +4195,8 @@ pub fn _mm256_mask_cmpneq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> _ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpneq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpneq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_ne(a.as_u16x8(), b.as_u16x8())) } } @@ -3877,7 +4207,8 @@ pub fn _mm_cmpneq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpneq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpneq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu16_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -3888,7 +4219,8 @@ pub fn _mm_mask_cmpneq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mma #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpneq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpneq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_ne(a.as_u8x64(), b.as_u8x64())) } } @@ -3899,7 +4231,8 @@ pub fn _mm512_cmpneq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpneq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpneq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epu8_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -3910,7 +4243,8 @@ pub fn _mm512_mask_cmpneq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpneq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpneq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_ne(a.as_u8x32(), b.as_u8x32())) } } @@ -3921,7 +4255,8 @@ pub fn _mm256_cmpneq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpneq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpneq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epu8_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -3932,7 +4267,8 @@ pub fn _mm256_mask_cmpneq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpneq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpneq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_ne(a.as_u8x16(), b.as_u8x16())) } } @@ -3943,7 +4279,8 @@ pub fn _mm_cmpneq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpneq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpneq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epu8_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -3954,7 +4291,8 @@ pub fn _mm_mask_cmpneq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mma #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpneq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpneq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { simd_bitmask::(simd_ne(a.as_i16x32(), b.as_i16x32())) } } @@ -3965,7 +4303,8 @@ pub fn _mm512_cmpneq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpneq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpneq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_mask_cmp_epi16_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -3976,7 +4315,8 @@ pub fn _mm512_mask_cmpneq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> _ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpneq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpneq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { simd_bitmask::(simd_ne(a.as_i16x16(), b.as_i16x16())) } } @@ -3987,7 +4327,8 @@ pub fn _mm256_cmpneq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpneq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpneq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_mask_cmp_epi16_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -3998,7 +4339,8 @@ pub fn _mm256_mask_cmpneq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> _ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpneq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpneq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_ne(a.as_i16x8(), b.as_i16x8())) } } @@ -4009,7 +4351,8 @@ pub fn _mm_cmpneq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpneq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpneq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi16_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -4020,7 +4363,8 @@ pub fn _mm_mask_cmpneq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mma #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_cmpneq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpneq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { simd_bitmask::(simd_ne(a.as_i8x64(), b.as_i8x64())) } } @@ -4031,7 +4375,8 @@ pub fn _mm512_cmpneq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm512_mask_cmpneq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpneq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_mask_cmp_epi8_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -4042,7 +4387,8 @@ pub fn _mm512_mask_cmpneq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_cmpneq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpneq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { simd_bitmask::(simd_ne(a.as_i8x32(), b.as_i8x32())) } } @@ -4053,7 +4399,8 @@ pub fn _mm256_cmpneq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm256_mask_cmpneq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpneq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_mask_cmp_epi8_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -4064,7 +4411,8 @@ pub fn _mm256_mask_cmpneq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_cmpneq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpneq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { simd_bitmask::(simd_ne(a.as_i8x16(), b.as_i8x16())) } } @@ -4075,7 +4423,8 @@ pub fn _mm_cmpneq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] -pub fn _mm_mask_cmpneq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpneq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_mask_cmp_epi8_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -4087,7 +4436,8 @@ pub fn _mm_mask_cmpneq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mma #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm512_cmp_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmp_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_u16x32(); @@ -4114,7 +4464,8 @@ pub fn _mm512_cmp_epu16_mask(a: __m512i, b: __m512i) -> __mmask #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm512_mask_cmp_epu16_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmp_epu16_mask( k1: __mmask32, a: __m512i, b: __m512i, @@ -4146,7 +4497,8 @@ pub fn _mm512_mask_cmp_epu16_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm256_cmp_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmp_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_u16x16(); @@ -4173,7 +4525,8 @@ pub fn _mm256_cmp_epu16_mask(a: __m256i, b: __m256i) -> __mmask #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm256_mask_cmp_epu16_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmp_epu16_mask( k1: __mmask16, a: __m256i, b: __m256i, @@ -4205,7 +4558,8 @@ pub fn _mm256_mask_cmp_epu16_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm_cmp_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmp_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_u16x8(); @@ -4232,7 +4586,12 @@ pub fn _mm_cmp_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm_mask_cmp_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmp_epu16_mask( + k1: __mmask8, + a: __m128i, + b: __m128i, +) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_u16x8(); @@ -4260,7 +4619,8 @@ pub fn _mm_mask_cmp_epu16_mask(k1: __mmask8, a: __m128i, b: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm512_cmp_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmp_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_u8x64(); @@ -4287,7 +4647,8 @@ pub fn _mm512_cmp_epu8_mask(a: __m512i, b: __m512i) -> __mmask6 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm512_mask_cmp_epu8_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmp_epu8_mask( k1: __mmask64, a: __m512i, b: __m512i, @@ -4319,7 +4680,8 @@ pub fn _mm512_mask_cmp_epu8_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm256_cmp_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmp_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_u8x32(); @@ -4346,7 +4708,8 @@ pub fn _mm256_cmp_epu8_mask(a: __m256i, b: __m256i) -> __mmask3 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm256_mask_cmp_epu8_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmp_epu8_mask( k1: __mmask32, a: __m256i, b: __m256i, @@ -4378,7 +4741,8 @@ pub fn _mm256_mask_cmp_epu8_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm_cmp_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmp_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_u8x16(); @@ -4405,7 +4769,12 @@ pub fn _mm_cmp_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm_mask_cmp_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmp_epu8_mask( + k1: __mmask16, + a: __m128i, + b: __m128i, +) -> __mmask16 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_u8x16(); @@ -4433,7 +4802,8 @@ pub fn _mm_mask_cmp_epu8_mask(k1: __mmask16, a: __m128i, b: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm512_cmp_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmp_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_i16x32(); @@ -4460,7 +4830,8 @@ pub fn _mm512_cmp_epi16_mask(a: __m512i, b: __m512i) -> __mmask #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm512_mask_cmp_epi16_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmp_epi16_mask( k1: __mmask32, a: __m512i, b: __m512i, @@ -4492,7 +4863,8 @@ pub fn _mm512_mask_cmp_epi16_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm256_cmp_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmp_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_i16x16(); @@ -4519,7 +4891,8 @@ pub fn _mm256_cmp_epi16_mask(a: __m256i, b: __m256i) -> __mmask #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm256_mask_cmp_epi16_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmp_epi16_mask( k1: __mmask16, a: __m256i, b: __m256i, @@ -4551,7 +4924,8 @@ pub fn _mm256_mask_cmp_epi16_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm_cmp_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmp_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_i16x8(); @@ -4578,7 +4952,12 @@ pub fn _mm_cmp_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm_mask_cmp_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmp_epi16_mask( + k1: __mmask8, + a: __m128i, + b: __m128i, +) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_i16x8(); @@ -4606,7 +4985,8 @@ pub fn _mm_mask_cmp_epi16_mask(k1: __mmask8, a: __m128i, b: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm512_cmp_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmp_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_i8x64(); @@ -4633,7 +5013,8 @@ pub fn _mm512_cmp_epi8_mask(a: __m512i, b: __m512i) -> __mmask6 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm512_mask_cmp_epi8_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmp_epi8_mask( k1: __mmask64, a: __m512i, b: __m512i, @@ -4665,7 +5046,8 @@ pub fn _mm512_mask_cmp_epi8_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm256_cmp_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmp_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_i8x32(); @@ -4692,7 +5074,8 @@ pub fn _mm256_cmp_epi8_mask(a: __m256i, b: __m256i) -> __mmask3 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm256_mask_cmp_epi8_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmp_epi8_mask( k1: __mmask32, a: __m256i, b: __m256i, @@ -4724,7 +5107,8 @@ pub fn _mm256_mask_cmp_epi8_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm_cmp_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmp_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_i8x16(); @@ -4751,7 +5135,12 @@ pub fn _mm_cmp_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] -pub fn _mm_mask_cmp_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmp_epi8_mask( + k1: __mmask16, + a: __m128i, + b: __m128i, +) -> __mmask16 { unsafe { static_assert_uimm_bits!(IMM8, 3); let a = a.as_i8x16(); @@ -4777,8 +5166,9 @@ pub fn _mm_mask_cmp_epi8_mask(k1: __mmask16, a: __m128i, b: __m #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_add_epi16(a: __m256i) -> i16 { - unsafe { simd_reduce_add_unordered(a.as_i16x16()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_add_epi16(a: __m256i) -> i16 { + unsafe { simd_reduce_add_ordered(a.as_i16x16(), 0) } } /// Reduce the packed 16-bit integers in a by addition using mask k. Returns the sum of all active elements in a. @@ -4787,8 +5177,9 @@ pub fn _mm256_reduce_add_epi16(a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_add_epi16(k: __mmask16, a: __m256i) -> i16 { - unsafe { simd_reduce_add_unordered(simd_select_bitmask(k, a.as_i16x16(), i16x16::ZERO)) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_add_epi16(k: __mmask16, a: __m256i) -> i16 { + unsafe { simd_reduce_add_ordered(simd_select_bitmask(k, a.as_i16x16(), i16x16::ZERO), 0) } } /// Reduce the packed 16-bit integers in a by addition. Returns the sum of all elements in a. @@ -4797,8 +5188,9 @@ pub fn _mm256_mask_reduce_add_epi16(k: __mmask16, a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_add_epi16(a: __m128i) -> i16 { - unsafe { simd_reduce_add_unordered(a.as_i16x8()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_add_epi16(a: __m128i) -> i16 { + unsafe { simd_reduce_add_ordered(a.as_i16x8(), 0) } } /// Reduce the packed 16-bit integers in a by addition using mask k. Returns the sum of all active elements in a. @@ -4807,8 +5199,9 @@ pub fn _mm_reduce_add_epi16(a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_add_epi16(k: __mmask8, a: __m128i) -> i16 { - unsafe { simd_reduce_add_unordered(simd_select_bitmask(k, a.as_i16x8(), i16x8::ZERO)) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_add_epi16(k: __mmask8, a: __m128i) -> i16 { + unsafe { simd_reduce_add_ordered(simd_select_bitmask(k, a.as_i16x8(), i16x8::ZERO), 0) } } /// Reduce the packed 8-bit integers in a by addition. Returns the sum of all elements in a. @@ -4817,8 +5210,9 @@ pub fn _mm_mask_reduce_add_epi16(k: __mmask8, a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_add_epi8(a: __m256i) -> i8 { - unsafe { simd_reduce_add_unordered(a.as_i8x32()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_add_epi8(a: __m256i) -> i8 { + unsafe { simd_reduce_add_ordered(a.as_i8x32(), 0) } } /// Reduce the packed 8-bit integers in a by addition using mask k. Returns the sum of all active elements in a. @@ -4827,8 +5221,9 @@ pub fn _mm256_reduce_add_epi8(a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_add_epi8(k: __mmask32, a: __m256i) -> i8 { - unsafe { simd_reduce_add_unordered(simd_select_bitmask(k, a.as_i8x32(), i8x32::ZERO)) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_add_epi8(k: __mmask32, a: __m256i) -> i8 { + unsafe { simd_reduce_add_ordered(simd_select_bitmask(k, a.as_i8x32(), i8x32::ZERO), 0) } } /// Reduce the packed 8-bit integers in a by addition. Returns the sum of all elements in a. @@ -4837,8 +5232,9 @@ pub fn _mm256_mask_reduce_add_epi8(k: __mmask32, a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_add_epi8(a: __m128i) -> i8 { - unsafe { simd_reduce_add_unordered(a.as_i8x16()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_add_epi8(a: __m128i) -> i8 { + unsafe { simd_reduce_add_ordered(a.as_i8x16(), 0) } } /// Reduce the packed 8-bit integers in a by addition using mask k. Returns the sum of all active elements in a. @@ -4847,8 +5243,9 @@ pub fn _mm_reduce_add_epi8(a: __m128i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_add_epi8(k: __mmask16, a: __m128i) -> i8 { - unsafe { simd_reduce_add_unordered(simd_select_bitmask(k, a.as_i8x16(), i8x16::ZERO)) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_add_epi8(k: __mmask16, a: __m128i) -> i8 { + unsafe { simd_reduce_add_ordered(simd_select_bitmask(k, a.as_i8x16(), i8x16::ZERO), 0) } } /// Reduce the packed 16-bit integers in a by bitwise AND. Returns the bitwise AND of all elements in a. @@ -4857,7 +5254,8 @@ pub fn _mm_mask_reduce_add_epi8(k: __mmask16, a: __m128i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_and_epi16(a: __m256i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_and_epi16(a: __m256i) -> i16 { unsafe { simd_reduce_and(a.as_i16x16()) } } @@ -4867,7 +5265,8 @@ pub fn _mm256_reduce_and_epi16(a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_and_epi16(k: __mmask16, a: __m256i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_and_epi16(k: __mmask16, a: __m256i) -> i16 { unsafe { simd_reduce_and(simd_select_bitmask( k, @@ -4883,7 +5282,8 @@ pub fn _mm256_mask_reduce_and_epi16(k: __mmask16, a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_and_epi16(a: __m128i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_and_epi16(a: __m128i) -> i16 { unsafe { simd_reduce_and(a.as_i16x8()) } } @@ -4893,7 +5293,8 @@ pub fn _mm_reduce_and_epi16(a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_and_epi16(k: __mmask8, a: __m128i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_and_epi16(k: __mmask8, a: __m128i) -> i16 { unsafe { simd_reduce_and(simd_select_bitmask( k, @@ -4909,7 +5310,8 @@ pub fn _mm_mask_reduce_and_epi16(k: __mmask8, a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_and_epi8(a: __m256i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_and_epi8(a: __m256i) -> i8 { unsafe { simd_reduce_and(a.as_i8x32()) } } @@ -4919,7 +5321,8 @@ pub fn _mm256_reduce_and_epi8(a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_and_epi8(k: __mmask32, a: __m256i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_and_epi8(k: __mmask32, a: __m256i) -> i8 { unsafe { simd_reduce_and(simd_select_bitmask( k, @@ -4935,7 +5338,8 @@ pub fn _mm256_mask_reduce_and_epi8(k: __mmask32, a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_and_epi8(a: __m128i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_and_epi8(a: __m128i) -> i8 { unsafe { simd_reduce_and(a.as_i8x16()) } } @@ -4945,7 +5349,8 @@ pub fn _mm_reduce_and_epi8(a: __m128i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_and_epi8(k: __mmask16, a: __m128i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_and_epi8(k: __mmask16, a: __m128i) -> i8 { unsafe { simd_reduce_and(simd_select_bitmask( k, @@ -4961,7 +5366,8 @@ pub fn _mm_mask_reduce_and_epi8(k: __mmask16, a: __m128i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_max_epi16(a: __m256i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_max_epi16(a: __m256i) -> i16 { unsafe { simd_reduce_max(a.as_i16x16()) } } @@ -4971,7 +5377,8 @@ pub fn _mm256_reduce_max_epi16(a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_max_epi16(k: __mmask16, a: __m256i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_max_epi16(k: __mmask16, a: __m256i) -> i16 { unsafe { simd_reduce_max(simd_select_bitmask(k, a.as_i16x16(), i16x16::splat(-32768))) } } @@ -4981,7 +5388,8 @@ pub fn _mm256_mask_reduce_max_epi16(k: __mmask16, a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_max_epi16(a: __m128i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_max_epi16(a: __m128i) -> i16 { unsafe { simd_reduce_max(a.as_i16x8()) } } @@ -4991,7 +5399,8 @@ pub fn _mm_reduce_max_epi16(a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_max_epi16(k: __mmask8, a: __m128i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_max_epi16(k: __mmask8, a: __m128i) -> i16 { unsafe { simd_reduce_max(simd_select_bitmask(k, a.as_i16x8(), i16x8::splat(-32768))) } } @@ -5001,7 +5410,8 @@ pub fn _mm_mask_reduce_max_epi16(k: __mmask8, a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_max_epi8(a: __m256i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_max_epi8(a: __m256i) -> i8 { unsafe { simd_reduce_max(a.as_i8x32()) } } @@ -5011,7 +5421,8 @@ pub fn _mm256_reduce_max_epi8(a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_max_epi8(k: __mmask32, a: __m256i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_max_epi8(k: __mmask32, a: __m256i) -> i8 { unsafe { simd_reduce_max(simd_select_bitmask(k, a.as_i8x32(), i8x32::splat(-128))) } } @@ -5021,7 +5432,8 @@ pub fn _mm256_mask_reduce_max_epi8(k: __mmask32, a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_max_epi8(a: __m128i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_max_epi8(a: __m128i) -> i8 { unsafe { simd_reduce_max(a.as_i8x16()) } } @@ -5031,7 +5443,8 @@ pub fn _mm_reduce_max_epi8(a: __m128i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_max_epi8(k: __mmask16, a: __m128i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_max_epi8(k: __mmask16, a: __m128i) -> i8 { unsafe { simd_reduce_max(simd_select_bitmask(k, a.as_i8x16(), i8x16::splat(-128))) } } @@ -5041,7 +5454,8 @@ pub fn _mm_mask_reduce_max_epi8(k: __mmask16, a: __m128i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_max_epu16(a: __m256i) -> u16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_max_epu16(a: __m256i) -> u16 { unsafe { simd_reduce_max(a.as_u16x16()) } } @@ -5051,7 +5465,8 @@ pub fn _mm256_reduce_max_epu16(a: __m256i) -> u16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_max_epu16(k: __mmask16, a: __m256i) -> u16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_max_epu16(k: __mmask16, a: __m256i) -> u16 { unsafe { simd_reduce_max(simd_select_bitmask(k, a.as_u16x16(), u16x16::ZERO)) } } @@ -5061,7 +5476,8 @@ pub fn _mm256_mask_reduce_max_epu16(k: __mmask16, a: __m256i) -> u16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_max_epu16(a: __m128i) -> u16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_max_epu16(a: __m128i) -> u16 { unsafe { simd_reduce_max(a.as_u16x8()) } } @@ -5071,7 +5487,8 @@ pub fn _mm_reduce_max_epu16(a: __m128i) -> u16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_max_epu16(k: __mmask8, a: __m128i) -> u16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_max_epu16(k: __mmask8, a: __m128i) -> u16 { unsafe { simd_reduce_max(simd_select_bitmask(k, a.as_u16x8(), u16x8::ZERO)) } } @@ -5081,7 +5498,8 @@ pub fn _mm_mask_reduce_max_epu16(k: __mmask8, a: __m128i) -> u16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_max_epu8(a: __m256i) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_max_epu8(a: __m256i) -> u8 { unsafe { simd_reduce_max(a.as_u8x32()) } } @@ -5091,7 +5509,8 @@ pub fn _mm256_reduce_max_epu8(a: __m256i) -> u8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_max_epu8(k: __mmask32, a: __m256i) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_max_epu8(k: __mmask32, a: __m256i) -> u8 { unsafe { simd_reduce_max(simd_select_bitmask(k, a.as_u8x32(), u8x32::ZERO)) } } @@ -5101,7 +5520,8 @@ pub fn _mm256_mask_reduce_max_epu8(k: __mmask32, a: __m256i) -> u8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_max_epu8(a: __m128i) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_max_epu8(a: __m128i) -> u8 { unsafe { simd_reduce_max(a.as_u8x16()) } } @@ -5111,7 +5531,8 @@ pub fn _mm_reduce_max_epu8(a: __m128i) -> u8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_max_epu8(k: __mmask16, a: __m128i) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_max_epu8(k: __mmask16, a: __m128i) -> u8 { unsafe { simd_reduce_max(simd_select_bitmask(k, a.as_u8x16(), u8x16::ZERO)) } } @@ -5121,7 +5542,8 @@ pub fn _mm_mask_reduce_max_epu8(k: __mmask16, a: __m128i) -> u8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_min_epi16(a: __m256i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_min_epi16(a: __m256i) -> i16 { unsafe { simd_reduce_min(a.as_i16x16()) } } @@ -5131,7 +5553,8 @@ pub fn _mm256_reduce_min_epi16(a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_min_epi16(k: __mmask16, a: __m256i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_min_epi16(k: __mmask16, a: __m256i) -> i16 { unsafe { simd_reduce_min(simd_select_bitmask(k, a.as_i16x16(), i16x16::splat(0x7fff))) } } @@ -5141,7 +5564,8 @@ pub fn _mm256_mask_reduce_min_epi16(k: __mmask16, a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_min_epi16(a: __m128i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_min_epi16(a: __m128i) -> i16 { unsafe { simd_reduce_min(a.as_i16x8()) } } @@ -5151,7 +5575,8 @@ pub fn _mm_reduce_min_epi16(a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_min_epi16(k: __mmask8, a: __m128i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_min_epi16(k: __mmask8, a: __m128i) -> i16 { unsafe { simd_reduce_min(simd_select_bitmask(k, a.as_i16x8(), i16x8::splat(0x7fff))) } } @@ -5161,7 +5586,8 @@ pub fn _mm_mask_reduce_min_epi16(k: __mmask8, a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_min_epi8(a: __m256i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_min_epi8(a: __m256i) -> i8 { unsafe { simd_reduce_min(a.as_i8x32()) } } @@ -5171,7 +5597,8 @@ pub fn _mm256_reduce_min_epi8(a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_min_epi8(k: __mmask32, a: __m256i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_min_epi8(k: __mmask32, a: __m256i) -> i8 { unsafe { simd_reduce_min(simd_select_bitmask(k, a.as_i8x32(), i8x32::splat(0x7f))) } } @@ -5181,7 +5608,8 @@ pub fn _mm256_mask_reduce_min_epi8(k: __mmask32, a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_min_epi8(a: __m128i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_min_epi8(a: __m128i) -> i8 { unsafe { simd_reduce_min(a.as_i8x16()) } } @@ -5191,7 +5619,8 @@ pub fn _mm_reduce_min_epi8(a: __m128i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_min_epi8(k: __mmask16, a: __m128i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_min_epi8(k: __mmask16, a: __m128i) -> i8 { unsafe { simd_reduce_min(simd_select_bitmask(k, a.as_i8x16(), i8x16::splat(0x7f))) } } @@ -5201,7 +5630,8 @@ pub fn _mm_mask_reduce_min_epi8(k: __mmask16, a: __m128i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_min_epu16(a: __m256i) -> u16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_min_epu16(a: __m256i) -> u16 { unsafe { simd_reduce_min(a.as_u16x16()) } } @@ -5211,7 +5641,8 @@ pub fn _mm256_reduce_min_epu16(a: __m256i) -> u16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_min_epu16(k: __mmask16, a: __m256i) -> u16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_min_epu16(k: __mmask16, a: __m256i) -> u16 { unsafe { simd_reduce_min(simd_select_bitmask(k, a.as_u16x16(), u16x16::splat(0xffff))) } } @@ -5221,7 +5652,8 @@ pub fn _mm256_mask_reduce_min_epu16(k: __mmask16, a: __m256i) -> u16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_min_epu16(a: __m128i) -> u16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_min_epu16(a: __m128i) -> u16 { unsafe { simd_reduce_min(a.as_u16x8()) } } @@ -5231,7 +5663,8 @@ pub fn _mm_reduce_min_epu16(a: __m128i) -> u16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_min_epu16(k: __mmask8, a: __m128i) -> u16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_min_epu16(k: __mmask8, a: __m128i) -> u16 { unsafe { simd_reduce_min(simd_select_bitmask(k, a.as_u16x8(), u16x8::splat(0xffff))) } } @@ -5241,7 +5674,8 @@ pub fn _mm_mask_reduce_min_epu16(k: __mmask8, a: __m128i) -> u16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_min_epu8(a: __m256i) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_min_epu8(a: __m256i) -> u8 { unsafe { simd_reduce_min(a.as_u8x32()) } } @@ -5251,7 +5685,8 @@ pub fn _mm256_reduce_min_epu8(a: __m256i) -> u8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_min_epu8(k: __mmask32, a: __m256i) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_min_epu8(k: __mmask32, a: __m256i) -> u8 { unsafe { simd_reduce_min(simd_select_bitmask(k, a.as_u8x32(), u8x32::splat(0xff))) } } @@ -5261,7 +5696,8 @@ pub fn _mm256_mask_reduce_min_epu8(k: __mmask32, a: __m256i) -> u8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_min_epu8(a: __m128i) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_min_epu8(a: __m128i) -> u8 { unsafe { simd_reduce_min(a.as_u8x16()) } } @@ -5271,7 +5707,8 @@ pub fn _mm_reduce_min_epu8(a: __m128i) -> u8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_min_epu8(k: __mmask16, a: __m128i) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_min_epu8(k: __mmask16, a: __m128i) -> u8 { unsafe { simd_reduce_min(simd_select_bitmask(k, a.as_u8x16(), u8x16::splat(0xff))) } } @@ -5281,8 +5718,9 @@ pub fn _mm_mask_reduce_min_epu8(k: __mmask16, a: __m128i) -> u8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_mul_epi16(a: __m256i) -> i16 { - unsafe { simd_reduce_mul_unordered(a.as_i16x16()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_mul_epi16(a: __m256i) -> i16 { + unsafe { simd_reduce_mul_ordered(a.as_i16x16(), 1) } } /// Reduce the packed 16-bit integers in a by multiplication using mask k. Returns the product of all active elements in a. @@ -5291,8 +5729,9 @@ pub fn _mm256_reduce_mul_epi16(a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_mul_epi16(k: __mmask16, a: __m256i) -> i16 { - unsafe { simd_reduce_mul_unordered(simd_select_bitmask(k, a.as_i16x16(), i16x16::splat(1))) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_mul_epi16(k: __mmask16, a: __m256i) -> i16 { + unsafe { simd_reduce_mul_ordered(simd_select_bitmask(k, a.as_i16x16(), i16x16::splat(1)), 1) } } /// Reduce the packed 16-bit integers in a by multiplication. Returns the product of all elements in a. @@ -5301,8 +5740,9 @@ pub fn _mm256_mask_reduce_mul_epi16(k: __mmask16, a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_mul_epi16(a: __m128i) -> i16 { - unsafe { simd_reduce_mul_unordered(a.as_i16x8()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_mul_epi16(a: __m128i) -> i16 { + unsafe { simd_reduce_mul_ordered(a.as_i16x8(), 1) } } /// Reduce the packed 16-bit integers in a by multiplication using mask k. Returns the product of all active elements in a. @@ -5311,8 +5751,9 @@ pub fn _mm_reduce_mul_epi16(a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_mul_epi16(k: __mmask8, a: __m128i) -> i16 { - unsafe { simd_reduce_mul_unordered(simd_select_bitmask(k, a.as_i16x8(), i16x8::splat(1))) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_mul_epi16(k: __mmask8, a: __m128i) -> i16 { + unsafe { simd_reduce_mul_ordered(simd_select_bitmask(k, a.as_i16x8(), i16x8::splat(1)), 1) } } /// Reduce the packed 8-bit integers in a by multiplication. Returns the product of all elements in a. @@ -5321,8 +5762,9 @@ pub fn _mm_mask_reduce_mul_epi16(k: __mmask8, a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_mul_epi8(a: __m256i) -> i8 { - unsafe { simd_reduce_mul_unordered(a.as_i8x32()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_mul_epi8(a: __m256i) -> i8 { + unsafe { simd_reduce_mul_ordered(a.as_i8x32(), 1) } } /// Reduce the packed 8-bit integers in a by multiplication using mask k. Returns the product of all active elements in a. @@ -5331,8 +5773,9 @@ pub fn _mm256_reduce_mul_epi8(a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_mul_epi8(k: __mmask32, a: __m256i) -> i8 { - unsafe { simd_reduce_mul_unordered(simd_select_bitmask(k, a.as_i8x32(), i8x32::splat(1))) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_mul_epi8(k: __mmask32, a: __m256i) -> i8 { + unsafe { simd_reduce_mul_ordered(simd_select_bitmask(k, a.as_i8x32(), i8x32::splat(1)), 1) } } /// Reduce the packed 8-bit integers in a by multiplication. Returns the product of all elements in a. @@ -5341,8 +5784,9 @@ pub fn _mm256_mask_reduce_mul_epi8(k: __mmask32, a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_mul_epi8(a: __m128i) -> i8 { - unsafe { simd_reduce_mul_unordered(a.as_i8x16()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_mul_epi8(a: __m128i) -> i8 { + unsafe { simd_reduce_mul_ordered(a.as_i8x16(), 1) } } /// Reduce the packed 8-bit integers in a by multiplication using mask k. Returns the product of all active elements in a. @@ -5351,8 +5795,9 @@ pub fn _mm_reduce_mul_epi8(a: __m128i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_mul_epi8(k: __mmask16, a: __m128i) -> i8 { - unsafe { simd_reduce_mul_unordered(simd_select_bitmask(k, a.as_i8x16(), i8x16::splat(1))) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_mul_epi8(k: __mmask16, a: __m128i) -> i8 { + unsafe { simd_reduce_mul_ordered(simd_select_bitmask(k, a.as_i8x16(), i8x16::splat(1)), 1) } } /// Reduce the packed 16-bit integers in a by bitwise OR. Returns the bitwise OR of all elements in a. @@ -5361,7 +5806,8 @@ pub fn _mm_mask_reduce_mul_epi8(k: __mmask16, a: __m128i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_or_epi16(a: __m256i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_or_epi16(a: __m256i) -> i16 { unsafe { simd_reduce_or(a.as_i16x16()) } } @@ -5371,7 +5817,8 @@ pub fn _mm256_reduce_or_epi16(a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_or_epi16(k: __mmask16, a: __m256i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_or_epi16(k: __mmask16, a: __m256i) -> i16 { unsafe { simd_reduce_or(simd_select_bitmask(k, a.as_i16x16(), i16x16::ZERO)) } } @@ -5381,7 +5828,8 @@ pub fn _mm256_mask_reduce_or_epi16(k: __mmask16, a: __m256i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_or_epi16(a: __m128i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_or_epi16(a: __m128i) -> i16 { unsafe { simd_reduce_or(a.as_i16x8()) } } @@ -5391,7 +5839,8 @@ pub fn _mm_reduce_or_epi16(a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_or_epi16(k: __mmask8, a: __m128i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_or_epi16(k: __mmask8, a: __m128i) -> i16 { unsafe { simd_reduce_or(simd_select_bitmask(k, a.as_i16x8(), i16x8::ZERO)) } } @@ -5401,7 +5850,8 @@ pub fn _mm_mask_reduce_or_epi16(k: __mmask8, a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_reduce_or_epi8(a: __m256i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_or_epi8(a: __m256i) -> i8 { unsafe { simd_reduce_or(a.as_i8x32()) } } @@ -5411,7 +5861,8 @@ pub fn _mm256_reduce_or_epi8(a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_reduce_or_epi8(k: __mmask32, a: __m256i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_reduce_or_epi8(k: __mmask32, a: __m256i) -> i8 { unsafe { simd_reduce_or(simd_select_bitmask(k, a.as_i8x32(), i8x32::ZERO)) } } @@ -5421,7 +5872,8 @@ pub fn _mm256_mask_reduce_or_epi8(k: __mmask32, a: __m256i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_reduce_or_epi8(a: __m128i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_or_epi8(a: __m128i) -> i8 { unsafe { simd_reduce_or(a.as_i8x16()) } } @@ -5431,7 +5883,8 @@ pub fn _mm_reduce_or_epi8(a: __m128i) -> i8 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_reduce_or_epi8(k: __mmask16, a: __m128i) -> i8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_reduce_or_epi8(k: __mmask16, a: __m128i) -> i8 { unsafe { simd_reduce_or(simd_select_bitmask(k, a.as_i8x16(), i8x16::ZERO)) } } @@ -5442,7 +5895,8 @@ pub fn _mm_mask_reduce_or_epi8(k: __mmask16, a: __m128i) -> i8 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 -pub unsafe fn _mm512_loadu_epi16(mem_addr: *const i16) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_loadu_epi16(mem_addr: *const i16) -> __m512i { ptr::read_unaligned(mem_addr as *const __m512i) } @@ -5453,7 +5907,8 @@ pub unsafe fn _mm512_loadu_epi16(mem_addr: *const i16) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 -pub unsafe fn _mm256_loadu_epi16(mem_addr: *const i16) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_loadu_epi16(mem_addr: *const i16) -> __m256i { ptr::read_unaligned(mem_addr as *const __m256i) } @@ -5464,7 +5919,8 @@ pub unsafe fn _mm256_loadu_epi16(mem_addr: *const i16) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 -pub unsafe fn _mm_loadu_epi16(mem_addr: *const i16) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadu_epi16(mem_addr: *const i16) -> __m128i { ptr::read_unaligned(mem_addr as *const __m128i) } @@ -5475,7 +5931,8 @@ pub unsafe fn _mm_loadu_epi16(mem_addr: *const i16) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 -pub unsafe fn _mm512_loadu_epi8(mem_addr: *const i8) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_loadu_epi8(mem_addr: *const i8) -> __m512i { ptr::read_unaligned(mem_addr as *const __m512i) } @@ -5486,7 +5943,8 @@ pub unsafe fn _mm512_loadu_epi8(mem_addr: *const i8) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 -pub unsafe fn _mm256_loadu_epi8(mem_addr: *const i8) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_loadu_epi8(mem_addr: *const i8) -> __m256i { ptr::read_unaligned(mem_addr as *const __m256i) } @@ -5497,7 +5955,8 @@ pub unsafe fn _mm256_loadu_epi8(mem_addr: *const i8) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 -pub unsafe fn _mm_loadu_epi8(mem_addr: *const i8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadu_epi8(mem_addr: *const i8) -> __m128i { ptr::read_unaligned(mem_addr as *const __m128i) } @@ -5508,7 +5967,8 @@ pub unsafe fn _mm_loadu_epi8(mem_addr: *const i8) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 -pub unsafe fn _mm512_storeu_epi16(mem_addr: *mut i16, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_storeu_epi16(mem_addr: *mut i16, a: __m512i) { ptr::write_unaligned(mem_addr as *mut __m512i, a); } @@ -5519,7 +5979,8 @@ pub unsafe fn _mm512_storeu_epi16(mem_addr: *mut i16, a: __m512i) { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 -pub unsafe fn _mm256_storeu_epi16(mem_addr: *mut i16, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_storeu_epi16(mem_addr: *mut i16, a: __m256i) { ptr::write_unaligned(mem_addr as *mut __m256i, a); } @@ -5530,7 +5991,8 @@ pub unsafe fn _mm256_storeu_epi16(mem_addr: *mut i16, a: __m256i) { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 -pub unsafe fn _mm_storeu_epi16(mem_addr: *mut i16, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeu_epi16(mem_addr: *mut i16, a: __m128i) { ptr::write_unaligned(mem_addr as *mut __m128i, a); } @@ -5541,7 +6003,8 @@ pub unsafe fn _mm_storeu_epi16(mem_addr: *mut i16, a: __m128i) { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 -pub unsafe fn _mm512_storeu_epi8(mem_addr: *mut i8, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_storeu_epi8(mem_addr: *mut i8, a: __m512i) { ptr::write_unaligned(mem_addr as *mut __m512i, a); } @@ -5552,7 +6015,8 @@ pub unsafe fn _mm512_storeu_epi8(mem_addr: *mut i8, a: __m512i) { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 -pub unsafe fn _mm256_storeu_epi8(mem_addr: *mut i8, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_storeu_epi8(mem_addr: *mut i8, a: __m256i) { ptr::write_unaligned(mem_addr as *mut __m256i, a); } @@ -5563,7 +6027,8 @@ pub unsafe fn _mm256_storeu_epi8(mem_addr: *mut i8, a: __m256i) { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 -pub unsafe fn _mm_storeu_epi8(mem_addr: *mut i8, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeu_epi8(mem_addr: *mut i8, a: __m128i) { ptr::write_unaligned(mem_addr as *mut __m128i, a); } @@ -5576,7 +6041,12 @@ pub unsafe fn _mm_storeu_epi8(mem_addr: *mut i8, a: __m128i) { #[target_feature(enable = "avx512bw")] #[cfg_attr(test, assert_instr(vmovdqu16))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_loadu_epi16(src: __m512i, k: __mmask32, mem_addr: *const i16) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_loadu_epi16( + src: __m512i, + k: __mmask32, + mem_addr: *const i16, +) -> __m512i { let mask = simd_select_bitmask(k, i16x32::splat(!0), i16x32::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i16x32()).as_m512i() } @@ -5590,7 +6060,8 @@ pub unsafe fn _mm512_mask_loadu_epi16(src: __m512i, k: __mmask32, mem_addr: *con #[target_feature(enable = "avx512bw")] #[cfg_attr(test, assert_instr(vmovdqu16))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_maskz_loadu_epi16(k: __mmask32, mem_addr: *const i16) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_maskz_loadu_epi16(k: __mmask32, mem_addr: *const i16) -> __m512i { _mm512_mask_loadu_epi16(_mm512_setzero_si512(), k, mem_addr) } @@ -5603,7 +6074,12 @@ pub unsafe fn _mm512_maskz_loadu_epi16(k: __mmask32, mem_addr: *const i16) -> __ #[target_feature(enable = "avx512bw")] #[cfg_attr(test, assert_instr(vmovdqu8))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_loadu_epi8(src: __m512i, k: __mmask64, mem_addr: *const i8) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_loadu_epi8( + src: __m512i, + k: __mmask64, + mem_addr: *const i8, +) -> __m512i { let mask = simd_select_bitmask(k, i8x64::splat(!0), i8x64::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i8x64()).as_m512i() } @@ -5617,7 +6093,8 @@ pub unsafe fn _mm512_mask_loadu_epi8(src: __m512i, k: __mmask64, mem_addr: *cons #[target_feature(enable = "avx512bw")] #[cfg_attr(test, assert_instr(vmovdqu8))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_maskz_loadu_epi8(k: __mmask64, mem_addr: *const i8) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_maskz_loadu_epi8(k: __mmask64, mem_addr: *const i8) -> __m512i { _mm512_mask_loadu_epi8(_mm512_setzero_si512(), k, mem_addr) } @@ -5630,7 +6107,12 @@ pub unsafe fn _mm512_maskz_loadu_epi8(k: __mmask64, mem_addr: *const i8) -> __m5 #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu16))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_loadu_epi16(src: __m256i, k: __mmask16, mem_addr: *const i16) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_loadu_epi16( + src: __m256i, + k: __mmask16, + mem_addr: *const i16, +) -> __m256i { let mask = simd_select_bitmask(k, i16x16::splat(!0), i16x16::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i16x16()).as_m256i() } @@ -5644,7 +6126,8 @@ pub unsafe fn _mm256_mask_loadu_epi16(src: __m256i, k: __mmask16, mem_addr: *con #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu16))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_maskz_loadu_epi16(k: __mmask16, mem_addr: *const i16) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskz_loadu_epi16(k: __mmask16, mem_addr: *const i16) -> __m256i { _mm256_mask_loadu_epi16(_mm256_setzero_si256(), k, mem_addr) } @@ -5657,7 +6140,12 @@ pub unsafe fn _mm256_maskz_loadu_epi16(k: __mmask16, mem_addr: *const i16) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu8))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_loadu_epi8(src: __m256i, k: __mmask32, mem_addr: *const i8) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_loadu_epi8( + src: __m256i, + k: __mmask32, + mem_addr: *const i8, +) -> __m256i { let mask = simd_select_bitmask(k, i8x32::splat(!0), i8x32::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i8x32()).as_m256i() } @@ -5671,7 +6159,8 @@ pub unsafe fn _mm256_mask_loadu_epi8(src: __m256i, k: __mmask32, mem_addr: *cons #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu8))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_maskz_loadu_epi8(k: __mmask32, mem_addr: *const i8) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskz_loadu_epi8(k: __mmask32, mem_addr: *const i8) -> __m256i { _mm256_mask_loadu_epi8(_mm256_setzero_si256(), k, mem_addr) } @@ -5684,7 +6173,12 @@ pub unsafe fn _mm256_maskz_loadu_epi8(k: __mmask32, mem_addr: *const i8) -> __m2 #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu16))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_loadu_epi16(src: __m128i, k: __mmask8, mem_addr: *const i16) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_loadu_epi16( + src: __m128i, + k: __mmask8, + mem_addr: *const i16, +) -> __m128i { let mask = simd_select_bitmask(k, i16x8::splat(!0), i16x8::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i16x8()).as_m128i() } @@ -5698,7 +6192,8 @@ pub unsafe fn _mm_mask_loadu_epi16(src: __m128i, k: __mmask8, mem_addr: *const i #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu16))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_maskz_loadu_epi16(k: __mmask8, mem_addr: *const i16) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskz_loadu_epi16(k: __mmask8, mem_addr: *const i16) -> __m128i { _mm_mask_loadu_epi16(_mm_setzero_si128(), k, mem_addr) } @@ -5711,7 +6206,12 @@ pub unsafe fn _mm_maskz_loadu_epi16(k: __mmask8, mem_addr: *const i16) -> __m128 #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu8))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_loadu_epi8(src: __m128i, k: __mmask16, mem_addr: *const i8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_loadu_epi8( + src: __m128i, + k: __mmask16, + mem_addr: *const i8, +) -> __m128i { let mask = simd_select_bitmask(k, i8x16::splat(!0), i8x16::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i8x16()).as_m128i() } @@ -5725,7 +6225,8 @@ pub unsafe fn _mm_mask_loadu_epi8(src: __m128i, k: __mmask16, mem_addr: *const i #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu8))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_maskz_loadu_epi8(k: __mmask16, mem_addr: *const i8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskz_loadu_epi8(k: __mmask16, mem_addr: *const i8) -> __m128i { _mm_mask_loadu_epi8(_mm_setzero_si128(), k, mem_addr) } @@ -5737,7 +6238,8 @@ pub unsafe fn _mm_maskz_loadu_epi8(k: __mmask16, mem_addr: *const i8) -> __m128i #[target_feature(enable = "avx512bw")] #[cfg_attr(test, assert_instr(vmovdqu16))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask32, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask32, a: __m512i) { let mask = simd_select_bitmask(mask, i16x32::splat(!0), i16x32::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i16x32()); } @@ -5750,7 +6252,8 @@ pub unsafe fn _mm512_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask32, a: _ #[target_feature(enable = "avx512bw")] #[cfg_attr(test, assert_instr(vmovdqu8))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask64, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask64, a: __m512i) { let mask = simd_select_bitmask(mask, i8x64::splat(!0), i8x64::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i8x64()); } @@ -5763,7 +6266,8 @@ pub unsafe fn _mm512_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask64, a: __m #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu16))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask16, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask16, a: __m256i) { let mask = simd_select_bitmask(mask, i16x16::splat(!0), i16x16::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i16x16()); } @@ -5776,7 +6280,8 @@ pub unsafe fn _mm256_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask16, a: _ #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu8))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask32, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask32, a: __m256i) { let mask = simd_select_bitmask(mask, i8x32::splat(!0), i8x32::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i8x32()); } @@ -5789,7 +6294,8 @@ pub unsafe fn _mm256_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask32, a: __m #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu16))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask8, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask8, a: __m128i) { let mask = simd_select_bitmask(mask, i16x8::splat(!0), i16x8::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i16x8()); } @@ -5802,7 +6308,8 @@ pub unsafe fn _mm_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask8, a: __m12 #[target_feature(enable = "avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu8))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask16, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask16, a: __m128i) { let mask = simd_select_bitmask(mask, i8x16::splat(!0), i8x16::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i8x16()); } @@ -5814,7 +6321,8 @@ pub unsafe fn _mm_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask16, a: __m128 #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaddwd))] -pub fn _mm512_madd_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_madd_epi16(a: __m512i, b: __m512i) -> __m512i { unsafe { let r: i32x32 = simd_mul(simd_cast(a.as_i16x32()), simd_cast(b.as_i16x32())); let even: i32x16 = simd_shuffle!( @@ -5838,7 +6346,8 @@ pub fn _mm512_madd_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaddwd))] -pub fn _mm512_mask_madd_epi16(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_madd_epi16(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let madd = _mm512_madd_epi16(a, b).as_i32x16(); transmute(simd_select_bitmask(k, madd, src.as_i32x16())) @@ -5852,7 +6361,8 @@ pub fn _mm512_mask_madd_epi16(src: __m512i, k: __mmask16, a: __m512i, b: __m512i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaddwd))] -pub fn _mm512_maskz_madd_epi16(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_madd_epi16(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let madd = _mm512_madd_epi16(a, b).as_i32x16(); transmute(simd_select_bitmask(k, madd, i32x16::ZERO)) @@ -5866,7 +6376,8 @@ pub fn _mm512_maskz_madd_epi16(k: __mmask16, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaddwd))] -pub fn _mm256_mask_madd_epi16(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_madd_epi16(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let madd = _mm256_madd_epi16(a, b).as_i32x8(); transmute(simd_select_bitmask(k, madd, src.as_i32x8())) @@ -5880,7 +6391,8 @@ pub fn _mm256_mask_madd_epi16(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaddwd))] -pub fn _mm256_maskz_madd_epi16(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_madd_epi16(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let madd = _mm256_madd_epi16(a, b).as_i32x8(); transmute(simd_select_bitmask(k, madd, i32x8::ZERO)) @@ -5894,7 +6406,8 @@ pub fn _mm256_maskz_madd_epi16(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaddwd))] -pub fn _mm_mask_madd_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_madd_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let madd = _mm_madd_epi16(a, b).as_i32x4(); transmute(simd_select_bitmask(k, madd, src.as_i32x4())) @@ -5908,7 +6421,8 @@ pub fn _mm_mask_madd_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaddwd))] -pub fn _mm_maskz_madd_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_madd_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let madd = _mm_madd_epi16(a, b).as_i32x4(); transmute(simd_select_bitmask(k, madd, i32x4::ZERO)) @@ -6397,7 +6911,8 @@ pub fn _mm_maskz_packus_epi16(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgw))] -pub fn _mm512_avg_epu16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_avg_epu16(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = simd_cast::<_, u32x32>(a.as_u16x32()); let b = simd_cast::<_, u32x32>(b.as_u16x32()); @@ -6413,7 +6928,8 @@ pub fn _mm512_avg_epu16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgw))] -pub fn _mm512_mask_avg_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_avg_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let avg = _mm512_avg_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, avg, src.as_u16x32())) @@ -6427,7 +6943,8 @@ pub fn _mm512_mask_avg_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgw))] -pub fn _mm512_maskz_avg_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_avg_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let avg = _mm512_avg_epu16(a, b).as_u16x32(); transmute(simd_select_bitmask(k, avg, u16x32::ZERO)) @@ -6441,7 +6958,8 @@ pub fn _mm512_maskz_avg_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgw))] -pub fn _mm256_mask_avg_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_avg_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let avg = _mm256_avg_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, avg, src.as_u16x16())) @@ -6455,7 +6973,8 @@ pub fn _mm256_mask_avg_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgw))] -pub fn _mm256_maskz_avg_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_avg_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let avg = _mm256_avg_epu16(a, b).as_u16x16(); transmute(simd_select_bitmask(k, avg, u16x16::ZERO)) @@ -6469,7 +6988,8 @@ pub fn _mm256_maskz_avg_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgw))] -pub fn _mm_mask_avg_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_avg_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let avg = _mm_avg_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, avg, src.as_u16x8())) @@ -6483,7 +7003,8 @@ pub fn _mm_mask_avg_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgw))] -pub fn _mm_maskz_avg_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_avg_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let avg = _mm_avg_epu16(a, b).as_u16x8(); transmute(simd_select_bitmask(k, avg, u16x8::ZERO)) @@ -6497,7 +7018,8 @@ pub fn _mm_maskz_avg_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgb))] -pub fn _mm512_avg_epu8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_avg_epu8(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = simd_cast::<_, u16x64>(a.as_u8x64()); let b = simd_cast::<_, u16x64>(b.as_u8x64()); @@ -6513,7 +7035,8 @@ pub fn _mm512_avg_epu8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgb))] -pub fn _mm512_mask_avg_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_avg_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let avg = _mm512_avg_epu8(a, b).as_u8x64(); transmute(simd_select_bitmask(k, avg, src.as_u8x64())) @@ -6527,7 +7050,8 @@ pub fn _mm512_mask_avg_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgb))] -pub fn _mm512_maskz_avg_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_avg_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let avg = _mm512_avg_epu8(a, b).as_u8x64(); transmute(simd_select_bitmask(k, avg, u8x64::ZERO)) @@ -6541,7 +7065,8 @@ pub fn _mm512_maskz_avg_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgb))] -pub fn _mm256_mask_avg_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_avg_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let avg = _mm256_avg_epu8(a, b).as_u8x32(); transmute(simd_select_bitmask(k, avg, src.as_u8x32())) @@ -6555,7 +7080,8 @@ pub fn _mm256_mask_avg_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgb))] -pub fn _mm256_maskz_avg_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_avg_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let avg = _mm256_avg_epu8(a, b).as_u8x32(); transmute(simd_select_bitmask(k, avg, u8x32::ZERO)) @@ -6569,7 +7095,8 @@ pub fn _mm256_maskz_avg_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgb))] -pub fn _mm_mask_avg_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_avg_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let avg = _mm_avg_epu8(a, b).as_u8x16(); transmute(simd_select_bitmask(k, avg, src.as_u8x16())) @@ -6583,7 +7110,8 @@ pub fn _mm_mask_avg_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpavgb))] -pub fn _mm_maskz_avg_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_avg_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let avg = _mm_avg_epu8(a, b).as_u8x16(); transmute(simd_select_bitmask(k, avg, u8x16::ZERO)) @@ -6693,7 +7221,8 @@ pub fn _mm_maskz_sll_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_slli_epi16(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_slli_epi16(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 16 { @@ -6712,7 +7241,12 @@ pub fn _mm512_slli_epi16(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_slli_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_slli_epi16( + src: __m512i, + k: __mmask32, + a: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = if IMM8 >= 16 { @@ -6732,7 +7266,8 @@ pub fn _mm512_mask_slli_epi16(src: __m512i, k: __mmask32, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_slli_epi16(k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_slli_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 16 { @@ -6752,7 +7287,12 @@ pub fn _mm512_maskz_slli_epi16(k: __mmask32, a: __m512i) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_slli_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_slli_epi16( + src: __m256i, + k: __mmask16, + a: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = if IMM8 >= 16 { @@ -6772,7 +7312,8 @@ pub fn _mm256_mask_slli_epi16(src: __m256i, k: __mmask16, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_slli_epi16(k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_slli_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 16 { @@ -6792,7 +7333,12 @@ pub fn _mm256_maskz_slli_epi16(k: __mmask16, a: __m256i) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_slli_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_slli_epi16( + src: __m128i, + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = if IMM8 >= 16 { @@ -6812,7 +7358,8 @@ pub fn _mm_mask_slli_epi16(src: __m128i, k: __mmask8, a: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_slli_epi16(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_slli_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 16 { @@ -6831,7 +7378,8 @@ pub fn _mm_maskz_slli_epi16(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvw))] -pub fn _mm512_sllv_epi16(a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_sllv_epi16(a: __m512i, count: __m512i) -> __m512i { unsafe { let count = count.as_u16x32(); let no_overflow: u16x32 = simd_lt(count, u16x32::splat(u16::BITS as u16)); @@ -6847,7 +7395,13 @@ pub fn _mm512_sllv_epi16(a: __m512i, count: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvw))] -pub fn _mm512_mask_sllv_epi16(src: __m512i, k: __mmask32, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_sllv_epi16( + src: __m512i, + k: __mmask32, + a: __m512i, + count: __m512i, +) -> __m512i { unsafe { let shf = _mm512_sllv_epi16(a, count).as_i16x32(); transmute(simd_select_bitmask(k, shf, src.as_i16x32())) @@ -6861,7 +7415,8 @@ pub fn _mm512_mask_sllv_epi16(src: __m512i, k: __mmask32, a: __m512i, count: __m #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvw))] -pub fn _mm512_maskz_sllv_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_sllv_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m512i { unsafe { let shf = _mm512_sllv_epi16(a, count).as_i16x32(); transmute(simd_select_bitmask(k, shf, i16x32::ZERO)) @@ -6875,7 +7430,8 @@ pub fn _mm512_maskz_sllv_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m5 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvw))] -pub fn _mm256_sllv_epi16(a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_sllv_epi16(a: __m256i, count: __m256i) -> __m256i { unsafe { let count = count.as_u16x16(); let no_overflow: u16x16 = simd_lt(count, u16x16::splat(u16::BITS as u16)); @@ -6891,7 +7447,13 @@ pub fn _mm256_sllv_epi16(a: __m256i, count: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvw))] -pub fn _mm256_mask_sllv_epi16(src: __m256i, k: __mmask16, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_sllv_epi16( + src: __m256i, + k: __mmask16, + a: __m256i, + count: __m256i, +) -> __m256i { unsafe { let shf = _mm256_sllv_epi16(a, count).as_i16x16(); transmute(simd_select_bitmask(k, shf, src.as_i16x16())) @@ -6905,7 +7467,8 @@ pub fn _mm256_mask_sllv_epi16(src: __m256i, k: __mmask16, a: __m256i, count: __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvw))] -pub fn _mm256_maskz_sllv_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_sllv_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m256i { unsafe { let shf = _mm256_sllv_epi16(a, count).as_i16x16(); transmute(simd_select_bitmask(k, shf, i16x16::ZERO)) @@ -6919,7 +7482,8 @@ pub fn _mm256_maskz_sllv_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m2 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvw))] -pub fn _mm_sllv_epi16(a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sllv_epi16(a: __m128i, count: __m128i) -> __m128i { unsafe { let count = count.as_u16x8(); let no_overflow: u16x8 = simd_lt(count, u16x8::splat(u16::BITS as u16)); @@ -6935,7 +7499,8 @@ pub fn _mm_sllv_epi16(a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvw))] -pub fn _mm_mask_sllv_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sllv_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_sllv_epi16(a, count).as_i16x8(); transmute(simd_select_bitmask(k, shf, src.as_i16x8())) @@ -6949,7 +7514,8 @@ pub fn _mm_mask_sllv_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvw))] -pub fn _mm_maskz_sllv_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sllv_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_sllv_epi16(a, count).as_i16x8(); transmute(simd_select_bitmask(k, shf, i16x8::ZERO)) @@ -7059,7 +7625,8 @@ pub fn _mm_maskz_srl_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_srli_epi16(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srli_epi16(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 16 { @@ -7078,7 +7645,12 @@ pub fn _mm512_srli_epi16(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_srli_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srli_epi16( + src: __m512i, + k: __mmask32, + a: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = if IMM8 >= 16 { @@ -7098,7 +7670,8 @@ pub fn _mm512_mask_srli_epi16(src: __m512i, k: __mmask32, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_srli_epi16(k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srli_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); //imm8 should be u32, it seems the document to verify is incorrect @@ -7119,7 +7692,12 @@ pub fn _mm512_maskz_srli_epi16(k: __mmask32, a: __m512i) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_srli_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srli_epi16( + src: __m256i, + k: __mmask16, + a: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm256_srli_epi16::(a); @@ -7135,7 +7713,8 @@ pub fn _mm256_mask_srli_epi16(src: __m256i, k: __mmask16, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_srli_epi16(k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srli_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm256_srli_epi16::(a); @@ -7151,7 +7730,12 @@ pub fn _mm256_maskz_srli_epi16(k: __mmask16, a: __m256i) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_srli_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srli_epi16( + src: __m128i, + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm_srli_epi16::(a); @@ -7167,7 +7751,8 @@ pub fn _mm_mask_srli_epi16(src: __m128i, k: __mmask8, a: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_srli_epi16(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srli_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm_srli_epi16::(a); @@ -7182,7 +7767,8 @@ pub fn _mm_maskz_srli_epi16(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvw))] -pub fn _mm512_srlv_epi16(a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srlv_epi16(a: __m512i, count: __m512i) -> __m512i { unsafe { let count = count.as_u16x32(); let no_overflow: u16x32 = simd_lt(count, u16x32::splat(u16::BITS as u16)); @@ -7198,7 +7784,13 @@ pub fn _mm512_srlv_epi16(a: __m512i, count: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvw))] -pub fn _mm512_mask_srlv_epi16(src: __m512i, k: __mmask32, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srlv_epi16( + src: __m512i, + k: __mmask32, + a: __m512i, + count: __m512i, +) -> __m512i { unsafe { let shf = _mm512_srlv_epi16(a, count).as_i16x32(); transmute(simd_select_bitmask(k, shf, src.as_i16x32())) @@ -7212,7 +7804,8 @@ pub fn _mm512_mask_srlv_epi16(src: __m512i, k: __mmask32, a: __m512i, count: __m #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvw))] -pub fn _mm512_maskz_srlv_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srlv_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m512i { unsafe { let shf = _mm512_srlv_epi16(a, count).as_i16x32(); transmute(simd_select_bitmask(k, shf, i16x32::ZERO)) @@ -7226,7 +7819,8 @@ pub fn _mm512_maskz_srlv_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m5 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvw))] -pub fn _mm256_srlv_epi16(a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srlv_epi16(a: __m256i, count: __m256i) -> __m256i { unsafe { let count = count.as_u16x16(); let no_overflow: u16x16 = simd_lt(count, u16x16::splat(u16::BITS as u16)); @@ -7242,7 +7836,13 @@ pub fn _mm256_srlv_epi16(a: __m256i, count: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvw))] -pub fn _mm256_mask_srlv_epi16(src: __m256i, k: __mmask16, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srlv_epi16( + src: __m256i, + k: __mmask16, + a: __m256i, + count: __m256i, +) -> __m256i { unsafe { let shf = _mm256_srlv_epi16(a, count).as_i16x16(); transmute(simd_select_bitmask(k, shf, src.as_i16x16())) @@ -7256,7 +7856,8 @@ pub fn _mm256_mask_srlv_epi16(src: __m256i, k: __mmask16, a: __m256i, count: __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvw))] -pub fn _mm256_maskz_srlv_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srlv_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m256i { unsafe { let shf = _mm256_srlv_epi16(a, count).as_i16x16(); transmute(simd_select_bitmask(k, shf, i16x16::ZERO)) @@ -7270,7 +7871,8 @@ pub fn _mm256_maskz_srlv_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m2 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvw))] -pub fn _mm_srlv_epi16(a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srlv_epi16(a: __m128i, count: __m128i) -> __m128i { unsafe { let count = count.as_u16x8(); let no_overflow: u16x8 = simd_lt(count, u16x8::splat(u16::BITS as u16)); @@ -7286,7 +7888,8 @@ pub fn _mm_srlv_epi16(a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvw))] -pub fn _mm_mask_srlv_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srlv_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srlv_epi16(a, count).as_i16x8(); transmute(simd_select_bitmask(k, shf, src.as_i16x8())) @@ -7300,7 +7903,8 @@ pub fn _mm_mask_srlv_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvw))] -pub fn _mm_maskz_srlv_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srlv_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srlv_epi16(a, count).as_i16x8(); transmute(simd_select_bitmask(k, shf, i16x8::ZERO)) @@ -7410,7 +8014,8 @@ pub fn _mm_maskz_sra_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_srai_epi16(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srai_epi16(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); transmute(simd_shr(a.as_i16x32(), i16x32::splat(IMM8.min(15) as i16))) @@ -7425,7 +8030,12 @@ pub fn _mm512_srai_epi16(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_srai_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srai_epi16( + src: __m512i, + k: __mmask32, + a: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = simd_shr(a.as_i16x32(), i16x32::splat(IMM8.min(15) as i16)); @@ -7441,7 +8051,8 @@ pub fn _mm512_mask_srai_epi16(src: __m512i, k: __mmask32, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_srai_epi16(k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srai_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = simd_shr(a.as_i16x32(), i16x32::splat(IMM8.min(15) as i16)); @@ -7457,7 +8068,12 @@ pub fn _mm512_maskz_srai_epi16(k: __mmask32, a: __m512i) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_srai_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srai_epi16( + src: __m256i, + k: __mmask16, + a: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = simd_shr(a.as_i16x16(), i16x16::splat(IMM8.min(15) as i16)); @@ -7473,7 +8089,8 @@ pub fn _mm256_mask_srai_epi16(src: __m256i, k: __mmask16, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_srai_epi16(k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srai_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = simd_shr(a.as_i16x16(), i16x16::splat(IMM8.min(15) as i16)); @@ -7489,7 +8106,12 @@ pub fn _mm256_maskz_srai_epi16(k: __mmask16, a: __m256i) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_srai_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srai_epi16( + src: __m128i, + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = simd_shr(a.as_i16x8(), i16x8::splat(IMM8.min(15) as i16)); @@ -7505,7 +8127,8 @@ pub fn _mm_mask_srai_epi16(src: __m128i, k: __mmask8, a: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_srai_epi16(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srai_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = simd_shr(a.as_i16x8(), i16x8::splat(IMM8.min(15) as i16)); @@ -7520,7 +8143,8 @@ pub fn _mm_maskz_srai_epi16(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravw))] -pub fn _mm512_srav_epi16(a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srav_epi16(a: __m512i, count: __m512i) -> __m512i { unsafe { let count = count.as_u16x32(); let no_overflow: u16x32 = simd_lt(count, u16x32::splat(u16::BITS as u16)); @@ -7536,7 +8160,13 @@ pub fn _mm512_srav_epi16(a: __m512i, count: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravw))] -pub fn _mm512_mask_srav_epi16(src: __m512i, k: __mmask32, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srav_epi16( + src: __m512i, + k: __mmask32, + a: __m512i, + count: __m512i, +) -> __m512i { unsafe { let shf = _mm512_srav_epi16(a, count).as_i16x32(); transmute(simd_select_bitmask(k, shf, src.as_i16x32())) @@ -7550,7 +8180,8 @@ pub fn _mm512_mask_srav_epi16(src: __m512i, k: __mmask32, a: __m512i, count: __m #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravw))] -pub fn _mm512_maskz_srav_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srav_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m512i { unsafe { let shf = _mm512_srav_epi16(a, count).as_i16x32(); transmute(simd_select_bitmask(k, shf, i16x32::ZERO)) @@ -7564,7 +8195,8 @@ pub fn _mm512_maskz_srav_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m5 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravw))] -pub fn _mm256_srav_epi16(a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srav_epi16(a: __m256i, count: __m256i) -> __m256i { unsafe { let count = count.as_u16x16(); let no_overflow: u16x16 = simd_lt(count, u16x16::splat(u16::BITS as u16)); @@ -7580,7 +8212,13 @@ pub fn _mm256_srav_epi16(a: __m256i, count: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravw))] -pub fn _mm256_mask_srav_epi16(src: __m256i, k: __mmask16, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srav_epi16( + src: __m256i, + k: __mmask16, + a: __m256i, + count: __m256i, +) -> __m256i { unsafe { let shf = _mm256_srav_epi16(a, count).as_i16x16(); transmute(simd_select_bitmask(k, shf, src.as_i16x16())) @@ -7594,7 +8232,8 @@ pub fn _mm256_mask_srav_epi16(src: __m256i, k: __mmask16, a: __m256i, count: __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravw))] -pub fn _mm256_maskz_srav_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srav_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m256i { unsafe { let shf = _mm256_srav_epi16(a, count).as_i16x16(); transmute(simd_select_bitmask(k, shf, i16x16::ZERO)) @@ -7608,7 +8247,8 @@ pub fn _mm256_maskz_srav_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m2 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravw))] -pub fn _mm_srav_epi16(a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srav_epi16(a: __m128i, count: __m128i) -> __m128i { unsafe { let count = count.as_u16x8(); let no_overflow: u16x8 = simd_lt(count, u16x8::splat(u16::BITS as u16)); @@ -7624,7 +8264,8 @@ pub fn _mm_srav_epi16(a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravw))] -pub fn _mm_mask_srav_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srav_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srav_epi16(a, count).as_i16x8(); transmute(simd_select_bitmask(k, shf, src.as_i16x8())) @@ -7638,7 +8279,8 @@ pub fn _mm_mask_srav_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravw))] -pub fn _mm_maskz_srav_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srav_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srav_epi16(a, count).as_i16x8(); transmute(simd_select_bitmask(k, shf, i16x8::ZERO)) @@ -7968,7 +8610,8 @@ pub fn _mm_maskz_permutexvar_epi16(k: __mmask8, idx: __m128i, a: __m128i) -> __m #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu16))] //should be vpblendmw -pub fn _mm512_mask_blend_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_blend_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask(k, b.as_i16x32(), a.as_i16x32())) } } @@ -7979,7 +8622,8 @@ pub fn _mm512_mask_blend_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu16))] //should be vpblendmw -pub fn _mm256_mask_blend_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_blend_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask(k, b.as_i16x16(), a.as_i16x16())) } } @@ -7990,7 +8634,8 @@ pub fn _mm256_mask_blend_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu16))] //should be vpblendmw -pub fn _mm_mask_blend_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_blend_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask(k, b.as_i16x8(), a.as_i16x8())) } } @@ -8001,7 +8646,8 @@ pub fn _mm_mask_blend_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu8))] //should be vpblendmb -pub fn _mm512_mask_blend_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_blend_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask(k, b.as_i8x64(), a.as_i8x64())) } } @@ -8012,7 +8658,8 @@ pub fn _mm512_mask_blend_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu8))] //should be vpblendmb -pub fn _mm256_mask_blend_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_blend_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask(k, b.as_i8x32(), a.as_i8x32())) } } @@ -8023,7 +8670,8 @@ pub fn _mm256_mask_blend_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu8))] //should be vpblendmb -pub fn _mm_mask_blend_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_blend_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask(k, b.as_i8x16(), a.as_i8x16())) } } @@ -8034,7 +8682,8 @@ pub fn _mm_mask_blend_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm512_broadcastw_epi16(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcastw_epi16(a: __m128i) -> __m512i { unsafe { let a = _mm512_castsi128_si512(a).as_i16x32(); let ret: i16x32 = simd_shuffle!( @@ -8056,7 +8705,8 @@ pub fn _mm512_broadcastw_epi16(a: __m128i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm512_mask_broadcastw_epi16(src: __m512i, k: __mmask32, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcastw_epi16(src: __m512i, k: __mmask32, a: __m128i) -> __m512i { unsafe { let broadcast = _mm512_broadcastw_epi16(a).as_i16x32(); transmute(simd_select_bitmask(k, broadcast, src.as_i16x32())) @@ -8070,7 +8720,8 @@ pub fn _mm512_mask_broadcastw_epi16(src: __m512i, k: __mmask32, a: __m128i) -> _ #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm512_maskz_broadcastw_epi16(k: __mmask32, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcastw_epi16(k: __mmask32, a: __m128i) -> __m512i { unsafe { let broadcast = _mm512_broadcastw_epi16(a).as_i16x32(); transmute(simd_select_bitmask(k, broadcast, i16x32::ZERO)) @@ -8084,7 +8735,8 @@ pub fn _mm512_maskz_broadcastw_epi16(k: __mmask32, a: __m128i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm256_mask_broadcastw_epi16(src: __m256i, k: __mmask16, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcastw_epi16(src: __m256i, k: __mmask16, a: __m128i) -> __m256i { unsafe { let broadcast = _mm256_broadcastw_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, broadcast, src.as_i16x16())) @@ -8098,7 +8750,8 @@ pub fn _mm256_mask_broadcastw_epi16(src: __m256i, k: __mmask16, a: __m128i) -> _ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm256_maskz_broadcastw_epi16(k: __mmask16, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcastw_epi16(k: __mmask16, a: __m128i) -> __m256i { unsafe { let broadcast = _mm256_broadcastw_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, broadcast, i16x16::ZERO)) @@ -8112,7 +8765,8 @@ pub fn _mm256_maskz_broadcastw_epi16(k: __mmask16, a: __m128i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm_mask_broadcastw_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_broadcastw_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let broadcast = _mm_broadcastw_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, broadcast, src.as_i16x8())) @@ -8126,7 +8780,8 @@ pub fn _mm_mask_broadcastw_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m12 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm_maskz_broadcastw_epi16(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_broadcastw_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { let broadcast = _mm_broadcastw_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, broadcast, i16x8::ZERO)) @@ -8140,7 +8795,8 @@ pub fn _mm_maskz_broadcastw_epi16(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastb))] -pub fn _mm512_broadcastb_epi8(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcastb_epi8(a: __m128i) -> __m512i { unsafe { let a = _mm512_castsi128_si512(a).as_i8x64(); let ret: i8x64 = simd_shuffle!( @@ -8163,7 +8819,8 @@ pub fn _mm512_broadcastb_epi8(a: __m128i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastb))] -pub fn _mm512_mask_broadcastb_epi8(src: __m512i, k: __mmask64, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcastb_epi8(src: __m512i, k: __mmask64, a: __m128i) -> __m512i { unsafe { let broadcast = _mm512_broadcastb_epi8(a).as_i8x64(); transmute(simd_select_bitmask(k, broadcast, src.as_i8x64())) @@ -8177,7 +8834,8 @@ pub fn _mm512_mask_broadcastb_epi8(src: __m512i, k: __mmask64, a: __m128i) -> __ #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastb))] -pub fn _mm512_maskz_broadcastb_epi8(k: __mmask64, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcastb_epi8(k: __mmask64, a: __m128i) -> __m512i { unsafe { let broadcast = _mm512_broadcastb_epi8(a).as_i8x64(); transmute(simd_select_bitmask(k, broadcast, i8x64::ZERO)) @@ -8191,7 +8849,8 @@ pub fn _mm512_maskz_broadcastb_epi8(k: __mmask64, a: __m128i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastb))] -pub fn _mm256_mask_broadcastb_epi8(src: __m256i, k: __mmask32, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcastb_epi8(src: __m256i, k: __mmask32, a: __m128i) -> __m256i { unsafe { let broadcast = _mm256_broadcastb_epi8(a).as_i8x32(); transmute(simd_select_bitmask(k, broadcast, src.as_i8x32())) @@ -8205,7 +8864,8 @@ pub fn _mm256_mask_broadcastb_epi8(src: __m256i, k: __mmask32, a: __m128i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastb))] -pub fn _mm256_maskz_broadcastb_epi8(k: __mmask32, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcastb_epi8(k: __mmask32, a: __m128i) -> __m256i { unsafe { let broadcast = _mm256_broadcastb_epi8(a).as_i8x32(); transmute(simd_select_bitmask(k, broadcast, i8x32::ZERO)) @@ -8219,7 +8879,8 @@ pub fn _mm256_maskz_broadcastb_epi8(k: __mmask32, a: __m128i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastb))] -pub fn _mm_mask_broadcastb_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_broadcastb_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { unsafe { let broadcast = _mm_broadcastb_epi8(a).as_i8x16(); transmute(simd_select_bitmask(k, broadcast, src.as_i8x16())) @@ -8233,7 +8894,8 @@ pub fn _mm_mask_broadcastb_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m12 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastb))] -pub fn _mm_maskz_broadcastb_epi8(k: __mmask16, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_broadcastb_epi8(k: __mmask16, a: __m128i) -> __m128i { unsafe { let broadcast = _mm_broadcastb_epi8(a).as_i8x16(); transmute(simd_select_bitmask(k, broadcast, i8x16::ZERO)) @@ -8247,7 +8909,8 @@ pub fn _mm_maskz_broadcastb_epi8(k: __mmask16, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhwd))] -pub fn _mm512_unpackhi_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpackhi_epi16(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = a.as_i16x32(); let b = b.as_i16x32(); @@ -8277,7 +8940,13 @@ pub fn _mm512_unpackhi_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhwd))] -pub fn _mm512_mask_unpackhi_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpackhi_epi16( + src: __m512i, + k: __mmask32, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let unpackhi = _mm512_unpackhi_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, unpackhi, src.as_i16x32())) @@ -8291,7 +8960,8 @@ pub fn _mm512_mask_unpackhi_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhwd))] -pub fn _mm512_maskz_unpackhi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpackhi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let unpackhi = _mm512_unpackhi_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, unpackhi, i16x32::ZERO)) @@ -8305,7 +8975,13 @@ pub fn _mm512_maskz_unpackhi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m5 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhwd))] -pub fn _mm256_mask_unpackhi_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpackhi_epi16( + src: __m256i, + k: __mmask16, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let unpackhi = _mm256_unpackhi_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, unpackhi, src.as_i16x16())) @@ -8319,7 +8995,8 @@ pub fn _mm256_mask_unpackhi_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhwd))] -pub fn _mm256_maskz_unpackhi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpackhi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let unpackhi = _mm256_unpackhi_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, unpackhi, i16x16::ZERO)) @@ -8333,7 +9010,8 @@ pub fn _mm256_maskz_unpackhi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m2 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhwd))] -pub fn _mm_mask_unpackhi_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpackhi_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpackhi = _mm_unpackhi_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, unpackhi, src.as_i16x8())) @@ -8347,7 +9025,8 @@ pub fn _mm_mask_unpackhi_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhwd))] -pub fn _mm_maskz_unpackhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpackhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpackhi = _mm_unpackhi_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, unpackhi, i16x8::ZERO)) @@ -8361,7 +9040,8 @@ pub fn _mm_maskz_unpackhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhbw))] -pub fn _mm512_unpackhi_epi8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpackhi_epi8(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = a.as_i8x64(); let b = b.as_i8x64(); @@ -8399,7 +9079,13 @@ pub fn _mm512_unpackhi_epi8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhbw))] -pub fn _mm512_mask_unpackhi_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpackhi_epi8( + src: __m512i, + k: __mmask64, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let unpackhi = _mm512_unpackhi_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, unpackhi, src.as_i8x64())) @@ -8413,7 +9099,8 @@ pub fn _mm512_mask_unpackhi_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m5 #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhbw))] -pub fn _mm512_maskz_unpackhi_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpackhi_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let unpackhi = _mm512_unpackhi_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, unpackhi, i8x64::ZERO)) @@ -8427,7 +9114,13 @@ pub fn _mm512_maskz_unpackhi_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m51 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhbw))] -pub fn _mm256_mask_unpackhi_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpackhi_epi8( + src: __m256i, + k: __mmask32, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let unpackhi = _mm256_unpackhi_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, unpackhi, src.as_i8x32())) @@ -8441,7 +9134,8 @@ pub fn _mm256_mask_unpackhi_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m2 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhbw))] -pub fn _mm256_maskz_unpackhi_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpackhi_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let unpackhi = _mm256_unpackhi_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, unpackhi, i8x32::ZERO)) @@ -8455,7 +9149,8 @@ pub fn _mm256_maskz_unpackhi_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m25 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhbw))] -pub fn _mm_mask_unpackhi_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpackhi_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpackhi = _mm_unpackhi_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, unpackhi, src.as_i8x16())) @@ -8469,7 +9164,8 @@ pub fn _mm_mask_unpackhi_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhbw))] -pub fn _mm_maskz_unpackhi_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpackhi_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpackhi = _mm_unpackhi_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, unpackhi, i8x16::ZERO)) @@ -8483,7 +9179,8 @@ pub fn _mm_maskz_unpackhi_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklwd))] -pub fn _mm512_unpacklo_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpacklo_epi16(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = a.as_i16x32(); let b = b.as_i16x32(); @@ -8513,7 +9210,13 @@ pub fn _mm512_unpacklo_epi16(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklwd))] -pub fn _mm512_mask_unpacklo_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpacklo_epi16( + src: __m512i, + k: __mmask32, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let unpacklo = _mm512_unpacklo_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, unpacklo, src.as_i16x32())) @@ -8527,7 +9230,8 @@ pub fn _mm512_mask_unpacklo_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklwd))] -pub fn _mm512_maskz_unpacklo_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpacklo_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { unsafe { let unpacklo = _mm512_unpacklo_epi16(a, b).as_i16x32(); transmute(simd_select_bitmask(k, unpacklo, i16x32::ZERO)) @@ -8541,7 +9245,13 @@ pub fn _mm512_maskz_unpacklo_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m5 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklwd))] -pub fn _mm256_mask_unpacklo_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpacklo_epi16( + src: __m256i, + k: __mmask16, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let unpacklo = _mm256_unpacklo_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, unpacklo, src.as_i16x16())) @@ -8555,7 +9265,8 @@ pub fn _mm256_mask_unpacklo_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklwd))] -pub fn _mm256_maskz_unpacklo_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpacklo_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { unsafe { let unpacklo = _mm256_unpacklo_epi16(a, b).as_i16x16(); transmute(simd_select_bitmask(k, unpacklo, i16x16::ZERO)) @@ -8569,7 +9280,8 @@ pub fn _mm256_maskz_unpacklo_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m2 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklwd))] -pub fn _mm_mask_unpacklo_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpacklo_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpacklo = _mm_unpacklo_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, unpacklo, src.as_i16x8())) @@ -8583,7 +9295,8 @@ pub fn _mm_mask_unpacklo_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklwd))] -pub fn _mm_maskz_unpacklo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpacklo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpacklo = _mm_unpacklo_epi16(a, b).as_i16x8(); transmute(simd_select_bitmask(k, unpacklo, i16x8::ZERO)) @@ -8597,7 +9310,8 @@ pub fn _mm_maskz_unpacklo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklbw))] -pub fn _mm512_unpacklo_epi8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpacklo_epi8(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = a.as_i8x64(); let b = b.as_i8x64(); @@ -8635,7 +9349,13 @@ pub fn _mm512_unpacklo_epi8(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklbw))] -pub fn _mm512_mask_unpacklo_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpacklo_epi8( + src: __m512i, + k: __mmask64, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let unpacklo = _mm512_unpacklo_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, unpacklo, src.as_i8x64())) @@ -8649,7 +9369,8 @@ pub fn _mm512_mask_unpacklo_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m5 #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklbw))] -pub fn _mm512_maskz_unpacklo_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpacklo_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { unsafe { let unpacklo = _mm512_unpacklo_epi8(a, b).as_i8x64(); transmute(simd_select_bitmask(k, unpacklo, i8x64::ZERO)) @@ -8663,7 +9384,13 @@ pub fn _mm512_maskz_unpacklo_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m51 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklbw))] -pub fn _mm256_mask_unpacklo_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpacklo_epi8( + src: __m256i, + k: __mmask32, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let unpacklo = _mm256_unpacklo_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, unpacklo, src.as_i8x32())) @@ -8677,7 +9404,8 @@ pub fn _mm256_mask_unpacklo_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m2 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklbw))] -pub fn _mm256_maskz_unpacklo_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpacklo_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { unsafe { let unpacklo = _mm256_unpacklo_epi8(a, b).as_i8x32(); transmute(simd_select_bitmask(k, unpacklo, i8x32::ZERO)) @@ -8691,7 +9419,8 @@ pub fn _mm256_maskz_unpacklo_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m25 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklbw))] -pub fn _mm_mask_unpacklo_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpacklo_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpacklo = _mm_unpacklo_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, unpacklo, src.as_i8x16())) @@ -8705,7 +9434,8 @@ pub fn _mm_mask_unpacklo_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklbw))] -pub fn _mm_maskz_unpacklo_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpacklo_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpacklo = _mm_unpacklo_epi8(a, b).as_i8x16(); transmute(simd_select_bitmask(k, unpacklo, i8x16::ZERO)) @@ -8719,7 +9449,8 @@ pub fn _mm_maskz_unpacklo_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu16))] -pub fn _mm512_mask_mov_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mov_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { unsafe { let mov = a.as_i16x32(); transmute(simd_select_bitmask(k, mov, src.as_i16x32())) @@ -8733,7 +9464,8 @@ pub fn _mm512_mask_mov_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu16))] -pub fn _mm512_maskz_mov_epi16(k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mov_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { let mov = a.as_i16x32(); transmute(simd_select_bitmask(k, mov, i16x32::ZERO)) @@ -8747,7 +9479,8 @@ pub fn _mm512_maskz_mov_epi16(k: __mmask32, a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu16))] -pub fn _mm256_mask_mov_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mov_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { unsafe { let mov = a.as_i16x16(); transmute(simd_select_bitmask(k, mov, src.as_i16x16())) @@ -8761,7 +9494,8 @@ pub fn _mm256_mask_mov_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu16))] -pub fn _mm256_maskz_mov_epi16(k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mov_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { let mov = a.as_i16x16(); transmute(simd_select_bitmask(k, mov, i16x16::ZERO)) @@ -8775,7 +9509,8 @@ pub fn _mm256_maskz_mov_epi16(k: __mmask16, a: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu16))] -pub fn _mm_mask_mov_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mov_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let mov = a.as_i16x8(); transmute(simd_select_bitmask(k, mov, src.as_i16x8())) @@ -8789,7 +9524,8 @@ pub fn _mm_mask_mov_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu16))] -pub fn _mm_maskz_mov_epi16(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mov_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { let mov = a.as_i16x8(); transmute(simd_select_bitmask(k, mov, i16x8::ZERO)) @@ -8803,7 +9539,8 @@ pub fn _mm_maskz_mov_epi16(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu8))] -pub fn _mm512_mask_mov_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mov_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { unsafe { let mov = a.as_i8x64(); transmute(simd_select_bitmask(k, mov, src.as_i8x64())) @@ -8817,7 +9554,8 @@ pub fn _mm512_mask_mov_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu8))] -pub fn _mm512_maskz_mov_epi8(k: __mmask64, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mov_epi8(k: __mmask64, a: __m512i) -> __m512i { unsafe { let mov = a.as_i8x64(); transmute(simd_select_bitmask(k, mov, i8x64::ZERO)) @@ -8831,7 +9569,8 @@ pub fn _mm512_maskz_mov_epi8(k: __mmask64, a: __m512i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu8))] -pub fn _mm256_mask_mov_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mov_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { unsafe { let mov = a.as_i8x32(); transmute(simd_select_bitmask(k, mov, src.as_i8x32())) @@ -8845,7 +9584,8 @@ pub fn _mm256_mask_mov_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu8))] -pub fn _mm256_maskz_mov_epi8(k: __mmask32, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mov_epi8(k: __mmask32, a: __m256i) -> __m256i { unsafe { let mov = a.as_i8x32(); transmute(simd_select_bitmask(k, mov, i8x32::ZERO)) @@ -8859,7 +9599,8 @@ pub fn _mm256_maskz_mov_epi8(k: __mmask32, a: __m256i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu8))] -pub fn _mm_mask_mov_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mov_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { unsafe { let mov = a.as_i8x16(); transmute(simd_select_bitmask(k, mov, src.as_i8x16())) @@ -8873,7 +9614,8 @@ pub fn _mm_mask_mov_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqu8))] -pub fn _mm_maskz_mov_epi8(k: __mmask16, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mov_epi8(k: __mmask16, a: __m128i) -> __m128i { unsafe { let mov = a.as_i8x16(); transmute(simd_select_bitmask(k, mov, i8x16::ZERO)) @@ -8887,7 +9629,8 @@ pub fn _mm_maskz_mov_epi8(k: __mmask16, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm512_mask_set1_epi16(src: __m512i, k: __mmask32, a: i16) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_set1_epi16(src: __m512i, k: __mmask32, a: i16) -> __m512i { unsafe { let r = _mm512_set1_epi16(a).as_i16x32(); transmute(simd_select_bitmask(k, r, src.as_i16x32())) @@ -8901,7 +9644,8 @@ pub fn _mm512_mask_set1_epi16(src: __m512i, k: __mmask32, a: i16) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm512_maskz_set1_epi16(k: __mmask32, a: i16) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_set1_epi16(k: __mmask32, a: i16) -> __m512i { unsafe { let r = _mm512_set1_epi16(a).as_i16x32(); transmute(simd_select_bitmask(k, r, i16x32::ZERO)) @@ -8915,7 +9659,8 @@ pub fn _mm512_maskz_set1_epi16(k: __mmask32, a: i16) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm256_mask_set1_epi16(src: __m256i, k: __mmask16, a: i16) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_set1_epi16(src: __m256i, k: __mmask16, a: i16) -> __m256i { unsafe { let r = _mm256_set1_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, r, src.as_i16x16())) @@ -8929,7 +9674,8 @@ pub fn _mm256_mask_set1_epi16(src: __m256i, k: __mmask16, a: i16) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm256_maskz_set1_epi16(k: __mmask16, a: i16) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_set1_epi16(k: __mmask16, a: i16) -> __m256i { unsafe { let r = _mm256_set1_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, r, i16x16::ZERO)) @@ -8943,7 +9689,8 @@ pub fn _mm256_maskz_set1_epi16(k: __mmask16, a: i16) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm_mask_set1_epi16(src: __m128i, k: __mmask8, a: i16) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_set1_epi16(src: __m128i, k: __mmask8, a: i16) -> __m128i { unsafe { let r = _mm_set1_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, r, src.as_i16x8())) @@ -8957,7 +9704,8 @@ pub fn _mm_mask_set1_epi16(src: __m128i, k: __mmask8, a: i16) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastw))] -pub fn _mm_maskz_set1_epi16(k: __mmask8, a: i16) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_set1_epi16(k: __mmask8, a: i16) -> __m128i { unsafe { let r = _mm_set1_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, r, i16x8::ZERO)) @@ -8971,7 +9719,8 @@ pub fn _mm_maskz_set1_epi16(k: __mmask8, a: i16) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] -pub fn _mm512_mask_set1_epi8(src: __m512i, k: __mmask64, a: i8) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_set1_epi8(src: __m512i, k: __mmask64, a: i8) -> __m512i { unsafe { let r = _mm512_set1_epi8(a).as_i8x64(); transmute(simd_select_bitmask(k, r, src.as_i8x64())) @@ -8985,7 +9734,8 @@ pub fn _mm512_mask_set1_epi8(src: __m512i, k: __mmask64, a: i8) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] -pub fn _mm512_maskz_set1_epi8(k: __mmask64, a: i8) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_set1_epi8(k: __mmask64, a: i8) -> __m512i { unsafe { let r = _mm512_set1_epi8(a).as_i8x64(); transmute(simd_select_bitmask(k, r, i8x64::ZERO)) @@ -8999,7 +9749,8 @@ pub fn _mm512_maskz_set1_epi8(k: __mmask64, a: i8) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] -pub fn _mm256_mask_set1_epi8(src: __m256i, k: __mmask32, a: i8) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_set1_epi8(src: __m256i, k: __mmask32, a: i8) -> __m256i { unsafe { let r = _mm256_set1_epi8(a).as_i8x32(); transmute(simd_select_bitmask(k, r, src.as_i8x32())) @@ -9013,7 +9764,8 @@ pub fn _mm256_mask_set1_epi8(src: __m256i, k: __mmask32, a: i8) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] -pub fn _mm256_maskz_set1_epi8(k: __mmask32, a: i8) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_set1_epi8(k: __mmask32, a: i8) -> __m256i { unsafe { let r = _mm256_set1_epi8(a).as_i8x32(); transmute(simd_select_bitmask(k, r, i8x32::ZERO)) @@ -9027,7 +9779,8 @@ pub fn _mm256_maskz_set1_epi8(k: __mmask32, a: i8) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] -pub fn _mm_mask_set1_epi8(src: __m128i, k: __mmask16, a: i8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_set1_epi8(src: __m128i, k: __mmask16, a: i8) -> __m128i { unsafe { let r = _mm_set1_epi8(a).as_i8x16(); transmute(simd_select_bitmask(k, r, src.as_i8x16())) @@ -9041,7 +9794,8 @@ pub fn _mm_mask_set1_epi8(src: __m128i, k: __mmask16, a: i8) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] -pub fn _mm_maskz_set1_epi8(k: __mmask16, a: i8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_set1_epi8(k: __mmask16, a: i8) -> __m128i { unsafe { let r = _mm_set1_epi8(a).as_i8x16(); transmute(simd_select_bitmask(k, r, i8x16::ZERO)) @@ -9056,7 +9810,8 @@ pub fn _mm_maskz_set1_epi8(k: __mmask16, a: i8) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_shufflelo_epi16(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shufflelo_epi16(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); @@ -9110,7 +9865,8 @@ pub fn _mm512_shufflelo_epi16(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_shufflelo_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shufflelo_epi16( src: __m512i, k: __mmask32, a: __m512i, @@ -9130,7 +9886,8 @@ pub fn _mm512_mask_shufflelo_epi16( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_shufflelo_epi16(k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shufflelo_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = _mm512_shufflelo_epi16::(a); @@ -9146,7 +9903,8 @@ pub fn _mm512_maskz_shufflelo_epi16(k: __mmask32, a: __m512i) - #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_shufflelo_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shufflelo_epi16( src: __m256i, k: __mmask16, a: __m256i, @@ -9166,7 +9924,8 @@ pub fn _mm256_mask_shufflelo_epi16( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_shufflelo_epi16(k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shufflelo_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm256_shufflelo_epi16::(a); @@ -9182,7 +9941,12 @@ pub fn _mm256_maskz_shufflelo_epi16(k: __mmask16, a: __m256i) - #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_shufflelo_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shufflelo_epi16( + src: __m128i, + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm_shufflelo_epi16::(a); @@ -9198,7 +9962,8 @@ pub fn _mm_mask_shufflelo_epi16(src: __m128i, k: __mmask8, a: _ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_shufflelo_epi16(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shufflelo_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm_shufflelo_epi16::(a); @@ -9214,7 +9979,8 @@ pub fn _mm_maskz_shufflelo_epi16(k: __mmask8, a: __m128i) -> __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_shufflehi_epi16(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shufflehi_epi16(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let a = a.as_i16x32(); @@ -9268,7 +10034,8 @@ pub fn _mm512_shufflehi_epi16(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_shufflehi_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shufflehi_epi16( src: __m512i, k: __mmask32, a: __m512i, @@ -9288,7 +10055,8 @@ pub fn _mm512_mask_shufflehi_epi16( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_shufflehi_epi16(k: __mmask32, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shufflehi_epi16(k: __mmask32, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = _mm512_shufflehi_epi16::(a); @@ -9304,7 +10072,8 @@ pub fn _mm512_maskz_shufflehi_epi16(k: __mmask32, a: __m512i) - #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_shufflehi_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shufflehi_epi16( src: __m256i, k: __mmask16, a: __m256i, @@ -9324,7 +10093,8 @@ pub fn _mm256_mask_shufflehi_epi16( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_shufflehi_epi16(k: __mmask16, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shufflehi_epi16(k: __mmask16, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm256_shufflehi_epi16::(a); @@ -9340,7 +10110,12 @@ pub fn _mm256_maskz_shufflehi_epi16(k: __mmask16, a: __m256i) - #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_shufflehi_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shufflehi_epi16( + src: __m128i, + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm_shufflehi_epi16::(a); @@ -9356,7 +10131,8 @@ pub fn _mm_mask_shufflehi_epi16(src: __m128i, k: __mmask8, a: _ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_shufflehi_epi16(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shufflehi_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shuffle = _mm_shufflehi_epi16::(a); @@ -9466,7 +10242,8 @@ pub fn _mm_maskz_shuffle_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmw))] -pub fn _mm512_test_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_test_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { let and = _mm512_and_si512(a, b); let zero = _mm512_setzero_si512(); _mm512_cmpneq_epi16_mask(and, zero) @@ -9479,7 +10256,8 @@ pub fn _mm512_test_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmw))] -pub fn _mm512_mask_test_epi16_mask(k: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_test_epi16_mask(k: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { let and = _mm512_and_si512(a, b); let zero = _mm512_setzero_si512(); _mm512_mask_cmpneq_epi16_mask(k, and, zero) @@ -9492,7 +10270,8 @@ pub fn _mm512_mask_test_epi16_mask(k: __mmask32, a: __m512i, b: __m512i) -> __mm #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmw))] -pub fn _mm256_test_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_test_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_cmpneq_epi16_mask(and, zero) @@ -9505,7 +10284,8 @@ pub fn _mm256_test_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmw))] -pub fn _mm256_mask_test_epi16_mask(k: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_test_epi16_mask(k: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_mask_cmpneq_epi16_mask(k, and, zero) @@ -9518,7 +10298,8 @@ pub fn _mm256_mask_test_epi16_mask(k: __mmask16, a: __m256i, b: __m256i) -> __mm #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmw))] -pub fn _mm_test_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_test_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_cmpneq_epi16_mask(and, zero) @@ -9531,7 +10312,8 @@ pub fn _mm_test_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmw))] -pub fn _mm_mask_test_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_test_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_mask_cmpneq_epi16_mask(k, and, zero) @@ -9544,7 +10326,8 @@ pub fn _mm_mask_test_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmb))] -pub fn _mm512_test_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_test_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { let and = _mm512_and_si512(a, b); let zero = _mm512_setzero_si512(); _mm512_cmpneq_epi8_mask(and, zero) @@ -9557,7 +10340,8 @@ pub fn _mm512_test_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmb))] -pub fn _mm512_mask_test_epi8_mask(k: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_test_epi8_mask(k: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { let and = _mm512_and_si512(a, b); let zero = _mm512_setzero_si512(); _mm512_mask_cmpneq_epi8_mask(k, and, zero) @@ -9570,7 +10354,8 @@ pub fn _mm512_mask_test_epi8_mask(k: __mmask64, a: __m512i, b: __m512i) -> __mma #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmb))] -pub fn _mm256_test_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_test_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_cmpneq_epi8_mask(and, zero) @@ -9583,7 +10368,8 @@ pub fn _mm256_test_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmb))] -pub fn _mm256_mask_test_epi8_mask(k: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_test_epi8_mask(k: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_mask_cmpneq_epi8_mask(k, and, zero) @@ -9596,7 +10382,8 @@ pub fn _mm256_mask_test_epi8_mask(k: __mmask32, a: __m256i, b: __m256i) -> __mma #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmb))] -pub fn _mm_test_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_test_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_cmpneq_epi8_mask(and, zero) @@ -9609,7 +10396,8 @@ pub fn _mm_test_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmb))] -pub fn _mm_mask_test_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_test_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_mask_cmpneq_epi8_mask(k, and, zero) @@ -9622,7 +10410,8 @@ pub fn _mm_mask_test_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) -> __mmask1 #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmw))] -pub fn _mm512_testn_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_testn_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { let and = _mm512_and_si512(a, b); let zero = _mm512_setzero_si512(); _mm512_cmpeq_epi16_mask(and, zero) @@ -9635,7 +10424,8 @@ pub fn _mm512_testn_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmw))] -pub fn _mm512_mask_testn_epi16_mask(k: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_testn_epi16_mask(k: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { let and = _mm512_and_si512(a, b); let zero = _mm512_setzero_si512(); _mm512_mask_cmpeq_epi16_mask(k, and, zero) @@ -9648,7 +10438,8 @@ pub fn _mm512_mask_testn_epi16_mask(k: __mmask32, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmw))] -pub fn _mm256_testn_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_testn_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_cmpeq_epi16_mask(and, zero) @@ -9661,7 +10452,8 @@ pub fn _mm256_testn_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmw))] -pub fn _mm256_mask_testn_epi16_mask(k: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_testn_epi16_mask(k: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_mask_cmpeq_epi16_mask(k, and, zero) @@ -9674,7 +10466,8 @@ pub fn _mm256_mask_testn_epi16_mask(k: __mmask16, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmw))] -pub fn _mm_testn_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_testn_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_cmpeq_epi16_mask(and, zero) @@ -9687,7 +10480,8 @@ pub fn _mm_testn_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmw))] -pub fn _mm_mask_testn_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_testn_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_mask_cmpeq_epi16_mask(k, and, zero) @@ -9700,7 +10494,8 @@ pub fn _mm_mask_testn_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmb))] -pub fn _mm512_testn_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_testn_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { let and = _mm512_and_si512(a, b); let zero = _mm512_setzero_si512(); _mm512_cmpeq_epi8_mask(and, zero) @@ -9713,7 +10508,8 @@ pub fn _mm512_testn_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmb))] -pub fn _mm512_mask_testn_epi8_mask(k: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_testn_epi8_mask(k: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { let and = _mm512_and_si512(a, b); let zero = _mm512_setzero_si512(); _mm512_mask_cmpeq_epi8_mask(k, and, zero) @@ -9726,7 +10522,8 @@ pub fn _mm512_mask_testn_epi8_mask(k: __mmask64, a: __m512i, b: __m512i) -> __mm #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmb))] -pub fn _mm256_testn_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_testn_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_cmpeq_epi8_mask(and, zero) @@ -9739,7 +10536,8 @@ pub fn _mm256_testn_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmb))] -pub fn _mm256_mask_testn_epi8_mask(k: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_testn_epi8_mask(k: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_mask_cmpeq_epi8_mask(k, and, zero) @@ -9752,7 +10550,8 @@ pub fn _mm256_mask_testn_epi8_mask(k: __mmask32, a: __m256i, b: __m256i) -> __mm #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmb))] -pub fn _mm_testn_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_testn_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_cmpeq_epi8_mask(and, zero) @@ -9765,7 +10564,8 @@ pub fn _mm_testn_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmb))] -pub fn _mm_mask_testn_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_testn_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_mask_cmpeq_epi8_mask(k, and, zero) @@ -9778,7 +10578,8 @@ pub fn _mm_mask_testn_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) -> __mmask #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(mov))] //should be kmovq -pub unsafe fn _store_mask64(mem_addr: *mut __mmask64, a: __mmask64) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _store_mask64(mem_addr: *mut __mmask64, a: __mmask64) { ptr::write(mem_addr as *mut __mmask64, a); } @@ -9789,7 +10590,8 @@ pub unsafe fn _store_mask64(mem_addr: *mut __mmask64, a: __mmask64) { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(mov))] //should be kmovd -pub unsafe fn _store_mask32(mem_addr: *mut __mmask32, a: __mmask32) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _store_mask32(mem_addr: *mut __mmask32, a: __mmask32) { ptr::write(mem_addr as *mut __mmask32, a); } @@ -9800,7 +10602,8 @@ pub unsafe fn _store_mask32(mem_addr: *mut __mmask32, a: __mmask32) { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(mov))] //should be kmovq -pub unsafe fn _load_mask64(mem_addr: *const __mmask64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _load_mask64(mem_addr: *const __mmask64) -> __mmask64 { ptr::read(mem_addr as *const __mmask64) } @@ -9811,7 +10614,8 @@ pub unsafe fn _load_mask64(mem_addr: *const __mmask64) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(mov))] //should be kmovd -pub unsafe fn _load_mask32(mem_addr: *const __mmask32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _load_mask32(mem_addr: *const __mmask32) -> __mmask32 { ptr::read(mem_addr as *const __mmask32) } @@ -10010,7 +10814,8 @@ pub fn _mm_maskz_dbsad_epu8(k: __mmask8, a: __m128i, b: __m128i #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovw2m))] -pub fn _mm512_movepi16_mask(a: __m512i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_movepi16_mask(a: __m512i) -> __mmask32 { let filter = _mm512_set1_epi16(1 << 15); let a = _mm512_and_si512(a, filter); _mm512_cmpeq_epi16_mask(a, filter) @@ -10023,7 +10828,8 @@ pub fn _mm512_movepi16_mask(a: __m512i) -> __mmask32 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovw2m))] -pub fn _mm256_movepi16_mask(a: __m256i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movepi16_mask(a: __m256i) -> __mmask16 { let filter = _mm256_set1_epi16(1 << 15); let a = _mm256_and_si256(a, filter); _mm256_cmpeq_epi16_mask(a, filter) @@ -10036,7 +10842,8 @@ pub fn _mm256_movepi16_mask(a: __m256i) -> __mmask16 { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovw2m))] -pub fn _mm_movepi16_mask(a: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movepi16_mask(a: __m128i) -> __mmask8 { let filter = _mm_set1_epi16(1 << 15); let a = _mm_and_si128(a, filter); _mm_cmpeq_epi16_mask(a, filter) @@ -10049,7 +10856,8 @@ pub fn _mm_movepi16_mask(a: __m128i) -> __mmask8 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovb2m))] -pub fn _mm512_movepi8_mask(a: __m512i) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_movepi8_mask(a: __m512i) -> __mmask64 { let filter = _mm512_set1_epi8(1 << 7); let a = _mm512_and_si512(a, filter); _mm512_cmpeq_epi8_mask(a, filter) @@ -10061,9 +10869,11 @@ pub fn _mm512_movepi8_mask(a: __m512i) -> __mmask64 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[cfg_attr(test, assert_instr(vpmovmskb))] // should be vpmovb2m but compiled to vpmovmskb in the test shim because that takes less cycles than +#[cfg_attr(test, assert_instr(vpmovmskb))] +// should be vpmovb2m but compiled to vpmovmskb in the test shim because that takes less cycles than // using vpmovb2m plus converting the mask register to a standard register. -pub fn _mm256_movepi8_mask(a: __m256i) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movepi8_mask(a: __m256i) -> __mmask32 { let filter = _mm256_set1_epi8(1 << 7); let a = _mm256_and_si256(a, filter); _mm256_cmpeq_epi8_mask(a, filter) @@ -10075,9 +10885,11 @@ pub fn _mm256_movepi8_mask(a: __m256i) -> __mmask32 { #[inline] #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -#[cfg_attr(test, assert_instr(vpmovmskb))] // should be vpmovb2m but compiled to vpmovmskb in the test shim because that takes less cycles than +#[cfg_attr(test, assert_instr(vpmovmskb))] +// should be vpmovb2m but compiled to vpmovmskb in the test shim because that takes less cycles than // using vpmovb2m plus converting the mask register to a standard register. -pub fn _mm_movepi8_mask(a: __m128i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movepi8_mask(a: __m128i) -> __mmask16 { let filter = _mm_set1_epi8(1 << 7); let a = _mm_and_si128(a, filter); _mm_cmpeq_epi8_mask(a, filter) @@ -10090,7 +10902,8 @@ pub fn _mm_movepi8_mask(a: __m128i) -> __mmask16 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovm2w))] -pub fn _mm512_movm_epi16(k: __mmask32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_movm_epi16(k: __mmask32) -> __m512i { unsafe { let one = _mm512_set1_epi16( 1 << 15 @@ -10122,7 +10935,8 @@ pub fn _mm512_movm_epi16(k: __mmask32) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovm2w))] -pub fn _mm256_movm_epi16(k: __mmask16) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movm_epi16(k: __mmask16) -> __m256i { unsafe { let one = _mm256_set1_epi16( 1 << 15 @@ -10154,7 +10968,8 @@ pub fn _mm256_movm_epi16(k: __mmask16) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovm2w))] -pub fn _mm_movm_epi16(k: __mmask8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movm_epi16(k: __mmask8) -> __m128i { unsafe { let one = _mm_set1_epi16( 1 << 15 @@ -10186,7 +11001,8 @@ pub fn _mm_movm_epi16(k: __mmask8) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovm2b))] -pub fn _mm512_movm_epi8(k: __mmask64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_movm_epi8(k: __mmask64) -> __m512i { unsafe { let one = _mm512_set1_epi8(1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0) @@ -10202,7 +11018,8 @@ pub fn _mm512_movm_epi8(k: __mmask64) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovm2b))] -pub fn _mm256_movm_epi8(k: __mmask32) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movm_epi8(k: __mmask32) -> __m256i { unsafe { let one = _mm256_set1_epi8(1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0) @@ -10218,7 +11035,8 @@ pub fn _mm256_movm_epi8(k: __mmask32) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovm2b))] -pub fn _mm_movm_epi8(k: __mmask16) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movm_epi8(k: __mmask16) -> __m128i { unsafe { let one = _mm_set1_epi8(1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0) @@ -10233,7 +11051,8 @@ pub fn _mm_movm_epi8(k: __mmask16) -> __m128i { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _cvtmask32_u32(a: __mmask32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _cvtmask32_u32(a: __mmask32) -> u32 { a } @@ -10243,7 +11062,8 @@ pub fn _cvtmask32_u32(a: __mmask32) -> u32 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _cvtu32_mask32(a: u32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _cvtu32_mask32(a: u32) -> __mmask32 { a } @@ -10253,7 +11073,8 @@ pub fn _cvtu32_mask32(a: u32) -> __mmask32 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { a.wrapping_add(b) } @@ -10263,7 +11084,8 @@ pub fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { a.wrapping_add(b) } @@ -10273,7 +11095,8 @@ pub fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { a & b } @@ -10283,7 +11106,8 @@ pub fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kand_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kand_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { a & b } @@ -10293,7 +11117,8 @@ pub fn _kand_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _knot_mask32(a: __mmask32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _knot_mask32(a: __mmask32) -> __mmask32 { !a } @@ -10303,7 +11128,8 @@ pub fn _knot_mask32(a: __mmask32) -> __mmask32 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _knot_mask64(a: __mmask64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _knot_mask64(a: __mmask64) -> __mmask64 { !a } @@ -10313,7 +11139,8 @@ pub fn _knot_mask64(a: __mmask64) -> __mmask64 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { _knot_mask32(a) & b } @@ -10323,7 +11150,8 @@ pub fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { _knot_mask64(a) & b } @@ -10333,7 +11161,8 @@ pub fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { a | b } @@ -10343,7 +11172,8 @@ pub fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { a | b } @@ -10353,7 +11183,8 @@ pub fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { a ^ b } @@ -10363,7 +11194,8 @@ pub fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { a ^ b } @@ -10373,7 +11205,8 @@ pub fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { _knot_mask32(a ^ b) } @@ -10383,7 +11216,8 @@ pub fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kxnor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kxnor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { _knot_mask64(a ^ b) } @@ -10394,7 +11228,8 @@ pub fn _kxnor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _kortest_mask32_u8(a: __mmask32, b: __mmask32, all_ones: *mut u8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _kortest_mask32_u8(a: __mmask32, b: __mmask32, all_ones: *mut u8) -> u8 { let tmp = _kor_mask32(a, b); *all_ones = (tmp == 0xffffffff) as u8; (tmp == 0) as u8 @@ -10407,7 +11242,8 @@ pub unsafe fn _kortest_mask32_u8(a: __mmask32, b: __mmask32, all_ones: *mut u8) #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _kortest_mask64_u8(a: __mmask64, b: __mmask64, all_ones: *mut u8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _kortest_mask64_u8(a: __mmask64, b: __mmask64, all_ones: *mut u8) -> u8 { let tmp = _kor_mask64(a, b); *all_ones = (tmp == 0xffffffff_ffffffff) as u8; (tmp == 0) as u8 @@ -10420,7 +11256,8 @@ pub unsafe fn _kortest_mask64_u8(a: __mmask64, b: __mmask64, all_ones: *mut u8) #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kortestc_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kortestc_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { (_kor_mask32(a, b) == 0xffffffff) as u8 } @@ -10431,7 +11268,8 @@ pub fn _kortestc_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kortestc_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kortestc_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { (_kor_mask64(a, b) == 0xffffffff_ffffffff) as u8 } @@ -10442,7 +11280,8 @@ pub fn _kortestc_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kortestz_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kortestz_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { (_kor_mask32(a, b) == 0) as u8 } @@ -10453,7 +11292,8 @@ pub fn _kortestz_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kortestz_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kortestz_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { (_kor_mask64(a, b) == 0) as u8 } @@ -10464,7 +11304,8 @@ pub fn _kortestz_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { #[target_feature(enable = "avx512bw")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kshiftli_mask32(a: __mmask32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kshiftli_mask32(a: __mmask32) -> __mmask32 { a.unbounded_shl(COUNT) } @@ -10475,7 +11316,8 @@ pub fn _kshiftli_mask32(a: __mmask32) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kshiftli_mask64(a: __mmask64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kshiftli_mask64(a: __mmask64) -> __mmask64 { a.unbounded_shl(COUNT) } @@ -10486,7 +11328,8 @@ pub fn _kshiftli_mask64(a: __mmask64) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kshiftri_mask32(a: __mmask32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kshiftri_mask32(a: __mmask32) -> __mmask32 { a.unbounded_shr(COUNT) } @@ -10497,7 +11340,8 @@ pub fn _kshiftri_mask32(a: __mmask32) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kshiftri_mask64(a: __mmask64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kshiftri_mask64(a: __mmask64) -> __mmask64 { a.unbounded_shr(COUNT) } @@ -10509,7 +11353,8 @@ pub fn _kshiftri_mask64(a: __mmask64) -> __mmask64 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _ktest_mask32_u8(a: __mmask32, b: __mmask32, and_not: *mut u8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _ktest_mask32_u8(a: __mmask32, b: __mmask32, and_not: *mut u8) -> u8 { *and_not = (_kandn_mask32(a, b) == 0) as u8; (_kand_mask32(a, b) == 0) as u8 } @@ -10522,7 +11367,8 @@ pub unsafe fn _ktest_mask32_u8(a: __mmask32, b: __mmask32, and_not: *mut u8) -> #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _ktest_mask64_u8(a: __mmask64, b: __mmask64, and_not: *mut u8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _ktest_mask64_u8(a: __mmask64, b: __mmask64, and_not: *mut u8) -> u8 { *and_not = (_kandn_mask64(a, b) == 0) as u8; (_kand_mask64(a, b) == 0) as u8 } @@ -10534,7 +11380,8 @@ pub unsafe fn _ktest_mask64_u8(a: __mmask64, b: __mmask64, and_not: *mut u8) -> #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _ktestc_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _ktestc_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { (_kandn_mask32(a, b) == 0) as u8 } @@ -10545,7 +11392,8 @@ pub fn _ktestc_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _ktestc_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _ktestc_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { (_kandn_mask64(a, b) == 0) as u8 } @@ -10556,7 +11404,8 @@ pub fn _ktestc_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _ktestz_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _ktestz_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { (_kand_mask32(a, b) == 0) as u8 } @@ -10567,7 +11416,8 @@ pub fn _ktestz_mask32_u8(a: __mmask32, b: __mmask32) -> u8 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _ktestz_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _ktestz_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { (_kand_mask64(a, b) == 0) as u8 } @@ -10578,7 +11428,8 @@ pub fn _ktestz_mask64_u8(a: __mmask64, b: __mmask64) -> u8 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kunpckwd -pub fn _mm512_kunpackw(a: __mmask32, b: __mmask32) -> __mmask32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_kunpackw(a: __mmask32, b: __mmask32) -> __mmask32 { ((a & 0xffff) << 16) | (b & 0xffff) } @@ -10589,7 +11440,8 @@ pub fn _mm512_kunpackw(a: __mmask32, b: __mmask32) -> __mmask32 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kunpckdq -pub fn _mm512_kunpackd(a: __mmask64, b: __mmask64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_kunpackd(a: __mmask64, b: __mmask64) -> __mmask64 { ((a & 0xffffffff) << 32) | (b & 0xffffffff) } @@ -10600,7 +11452,8 @@ pub fn _mm512_kunpackd(a: __mmask64, b: __mmask64) -> __mmask64 { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub fn _mm512_cvtepi16_epi8(a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi16_epi8(a: __m512i) -> __m256i { unsafe { let a = a.as_i16x32(); transmute::(simd_cast(a)) @@ -10614,7 +11467,8 @@ pub fn _mm512_cvtepi16_epi8(a: __m512i) -> __m256i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub fn _mm512_mask_cvtepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m256i { unsafe { let convert = _mm512_cvtepi16_epi8(a).as_i8x32(); transmute(simd_select_bitmask(k, convert, src.as_i8x32())) @@ -10628,7 +11482,8 @@ pub fn _mm512_mask_cvtepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m2 #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub fn _mm512_maskz_cvtepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { unsafe { let convert = _mm512_cvtepi16_epi8(a).as_i8x32(); transmute(simd_select_bitmask(k, convert, i8x32::ZERO)) @@ -10642,7 +11497,8 @@ pub fn _mm512_maskz_cvtepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub fn _mm256_cvtepi16_epi8(a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepi16_epi8(a: __m256i) -> __m128i { unsafe { let a = a.as_i16x16(); transmute::(simd_cast(a)) @@ -10656,7 +11512,8 @@ pub fn _mm256_cvtepi16_epi8(a: __m256i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub fn _mm256_mask_cvtepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m128i { unsafe { let convert = _mm256_cvtepi16_epi8(a).as_i8x16(); transmute(simd_select_bitmask(k, convert, src.as_i8x16())) @@ -10670,7 +11527,8 @@ pub fn _mm256_mask_cvtepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m1 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub fn _mm256_maskz_cvtepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { unsafe { let convert = _mm256_cvtepi16_epi8(a).as_i8x16(); transmute(simd_select_bitmask(k, convert, i8x16::ZERO)) @@ -10684,7 +11542,8 @@ pub fn _mm256_maskz_cvtepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub fn _mm_cvtepi16_epi8(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepi16_epi8(a: __m128i) -> __m128i { unsafe { let a = a.as_i16x8(); let v256: i16x16 = simd_shuffle!( @@ -10703,7 +11562,8 @@ pub fn _mm_cvtepi16_epi8(a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub fn _mm_mask_cvtepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let a = _mm_cvtepi16_epi8(a).as_i8x16(); let src = simd_shuffle!( @@ -10722,7 +11582,8 @@ pub fn _mm_mask_cvtepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub fn _mm_maskz_cvtepi16_epi8(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepi16_epi8(k: __mmask8, a: __m128i) -> __m128i { _mm_mask_cvtepi16_epi8(_mm_setzero_si128(), k, a) } @@ -10733,7 +11594,8 @@ pub fn _mm_maskz_cvtepi16_epi8(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovswb))] -pub fn _mm512_cvtsepi16_epi8(a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm512_cvtsepi16_epi8(a: __m512i) -> __m256i { unsafe { simd_cast::<_, i8x32>(simd_imax( simd_imin(a.as_i16x32(), i16x32::splat(i8::MAX as _)), @@ -10750,7 +11612,8 @@ pub fn _mm512_cvtsepi16_epi8(a: __m512i) -> __m256i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovswb))] -pub fn _mm512_mask_cvtsepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm512_mask_cvtsepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m256i { unsafe { simd_select_bitmask(k, _mm512_cvtsepi16_epi8(a).as_i8x32(), src.as_i8x32()).as_m256i() } @@ -10763,7 +11626,8 @@ pub fn _mm512_mask_cvtsepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovswb))] -pub fn _mm512_maskz_cvtsepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm512_maskz_cvtsepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { unsafe { simd_select_bitmask(k, _mm512_cvtsepi16_epi8(a).as_i8x32(), i8x32::ZERO).as_m256i() } } @@ -10774,7 +11638,8 @@ pub fn _mm512_maskz_cvtsepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovswb))] -pub fn _mm256_cvtsepi16_epi8(a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm256_cvtsepi16_epi8(a: __m256i) -> __m128i { unsafe { simd_cast::<_, i8x16>(simd_imax( simd_imin(a.as_i16x16(), i16x16::splat(i8::MAX as _)), @@ -10791,7 +11656,8 @@ pub fn _mm256_cvtsepi16_epi8(a: __m256i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovswb))] -pub fn _mm256_mask_cvtsepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm256_mask_cvtsepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m128i { unsafe { simd_select_bitmask(k, _mm256_cvtsepi16_epi8(a).as_i8x16(), src.as_i8x16()).as_m128i() } @@ -10804,7 +11670,8 @@ pub fn _mm256_mask_cvtsepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovswb))] -pub fn _mm256_maskz_cvtsepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm256_maskz_cvtsepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { unsafe { simd_select_bitmask(k, _mm256_cvtsepi16_epi8(a).as_i8x16(), i8x16::ZERO).as_m128i() } } @@ -10848,7 +11715,8 @@ pub fn _mm_maskz_cvtsepi16_epi8(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovuswb))] -pub fn _mm512_cvtusepi16_epi8(a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm512_cvtusepi16_epi8(a: __m512i) -> __m256i { unsafe { simd_cast::<_, u8x32>(simd_imin(a.as_u16x32(), u16x32::splat(u8::MAX as _))).as_m256i() } @@ -10861,7 +11729,8 @@ pub fn _mm512_cvtusepi16_epi8(a: __m512i) -> __m256i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovuswb))] -pub fn _mm512_mask_cvtusepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm512_mask_cvtusepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m256i { unsafe { simd_select_bitmask(k, _mm512_cvtusepi16_epi8(a).as_u8x32(), src.as_u8x32()).as_m256i() } @@ -10874,7 +11743,8 @@ pub fn _mm512_mask_cvtusepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __ #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovuswb))] -pub fn _mm512_maskz_cvtusepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm512_maskz_cvtusepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { unsafe { simd_select_bitmask(k, _mm512_cvtusepi16_epi8(a).as_u8x32(), u8x32::ZERO).as_m256i() } } @@ -10885,7 +11755,8 @@ pub fn _mm512_maskz_cvtusepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovuswb))] -pub fn _mm256_cvtusepi16_epi8(a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm256_cvtusepi16_epi8(a: __m256i) -> __m128i { unsafe { simd_cast::<_, u8x16>(simd_imin(a.as_u16x16(), u16x16::splat(u8::MAX as _))).as_m128i() } @@ -10898,7 +11769,8 @@ pub fn _mm256_cvtusepi16_epi8(a: __m256i) -> __m128i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovuswb))] -pub fn _mm256_mask_cvtusepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm256_mask_cvtusepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m128i { unsafe { simd_select_bitmask(k, _mm256_cvtusepi16_epi8(a).as_u8x16(), src.as_u8x16()).as_m128i() } @@ -10911,7 +11783,8 @@ pub fn _mm256_mask_cvtusepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __ #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovuswb))] -pub fn _mm256_maskz_cvtusepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const fn _mm256_maskz_cvtusepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { unsafe { simd_select_bitmask(k, _mm256_cvtusepi16_epi8(a).as_u8x16(), u8x16::ZERO).as_m128i() } } @@ -10955,7 +11828,8 @@ pub fn _mm_maskz_cvtusepi16_epi8(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbw))] -pub fn _mm512_cvtepi8_epi16(a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi8_epi16(a: __m256i) -> __m512i { unsafe { let a = a.as_i8x32(); transmute::(simd_cast(a)) @@ -10969,7 +11843,8 @@ pub fn _mm512_cvtepi8_epi16(a: __m256i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbw))] -pub fn _mm512_mask_cvtepi8_epi16(src: __m512i, k: __mmask32, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi8_epi16(src: __m512i, k: __mmask32, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepi8_epi16(a).as_i16x32(); transmute(simd_select_bitmask(k, convert, src.as_i16x32())) @@ -10983,7 +11858,8 @@ pub fn _mm512_mask_cvtepi8_epi16(src: __m512i, k: __mmask32, a: __m256i) -> __m5 #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbw))] -pub fn _mm512_maskz_cvtepi8_epi16(k: __mmask32, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi8_epi16(k: __mmask32, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepi8_epi16(a).as_i16x32(); transmute(simd_select_bitmask(k, convert, i16x32::ZERO)) @@ -10997,7 +11873,8 @@ pub fn _mm512_maskz_cvtepi8_epi16(k: __mmask32, a: __m256i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbw))] -pub fn _mm256_mask_cvtepi8_epi16(src: __m256i, k: __mmask16, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepi8_epi16(src: __m256i, k: __mmask16, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi8_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, convert, src.as_i16x16())) @@ -11011,7 +11888,8 @@ pub fn _mm256_mask_cvtepi8_epi16(src: __m256i, k: __mmask16, a: __m128i) -> __m2 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbw))] -pub fn _mm256_maskz_cvtepi8_epi16(k: __mmask16, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepi8_epi16(k: __mmask16, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi8_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, convert, i16x16::ZERO)) @@ -11025,7 +11903,8 @@ pub fn _mm256_maskz_cvtepi8_epi16(k: __mmask16, a: __m128i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbw))] -pub fn _mm_mask_cvtepi8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepi8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi8_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, convert, src.as_i16x8())) @@ -11039,7 +11918,8 @@ pub fn _mm_mask_cvtepi8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbw))] -pub fn _mm_maskz_cvtepi8_epi16(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepi8_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi8_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, convert, i16x8::ZERO)) @@ -11053,7 +11933,8 @@ pub fn _mm_maskz_cvtepi8_epi16(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbw))] -pub fn _mm512_cvtepu8_epi16(a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepu8_epi16(a: __m256i) -> __m512i { unsafe { let a = a.as_u8x32(); transmute::(simd_cast(a)) @@ -11067,7 +11948,8 @@ pub fn _mm512_cvtepu8_epi16(a: __m256i) -> __m512i { #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbw))] -pub fn _mm512_mask_cvtepu8_epi16(src: __m512i, k: __mmask32, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepu8_epi16(src: __m512i, k: __mmask32, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepu8_epi16(a).as_i16x32(); transmute(simd_select_bitmask(k, convert, src.as_i16x32())) @@ -11081,7 +11963,8 @@ pub fn _mm512_mask_cvtepu8_epi16(src: __m512i, k: __mmask32, a: __m256i) -> __m5 #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbw))] -pub fn _mm512_maskz_cvtepu8_epi16(k: __mmask32, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepu8_epi16(k: __mmask32, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepu8_epi16(a).as_i16x32(); transmute(simd_select_bitmask(k, convert, i16x32::ZERO)) @@ -11095,7 +11978,8 @@ pub fn _mm512_maskz_cvtepu8_epi16(k: __mmask32, a: __m256i) -> __m512i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbw))] -pub fn _mm256_mask_cvtepu8_epi16(src: __m256i, k: __mmask16, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepu8_epi16(src: __m256i, k: __mmask16, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu8_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, convert, src.as_i16x16())) @@ -11109,7 +11993,8 @@ pub fn _mm256_mask_cvtepu8_epi16(src: __m256i, k: __mmask16, a: __m128i) -> __m2 #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbw))] -pub fn _mm256_maskz_cvtepu8_epi16(k: __mmask16, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepu8_epi16(k: __mmask16, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu8_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, convert, i16x16::ZERO)) @@ -11123,7 +12008,8 @@ pub fn _mm256_maskz_cvtepu8_epi16(k: __mmask16, a: __m128i) -> __m256i { #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbw))] -pub fn _mm_mask_cvtepu8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepu8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu8_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, convert, src.as_i16x8())) @@ -11137,7 +12023,8 @@ pub fn _mm_mask_cvtepu8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbw))] -pub fn _mm_maskz_cvtepu8_epi16(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepu8_epi16(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu8_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, convert, i16x8::ZERO)) @@ -11152,7 +12039,8 @@ pub fn _mm_maskz_cvtepu8_epi16(k: __mmask8, a: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslldq, IMM8 = 3))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_bslli_epi128(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_bslli_epi128(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); const fn mask(shift: i32, i: u32) -> u32 { @@ -11247,7 +12135,8 @@ pub fn _mm512_bslli_epi128(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrldq, IMM8 = 3))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_bsrli_epi128(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_bsrli_epi128(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); const fn mask(shift: i32, i: u32) -> u32 { @@ -11344,7 +12233,8 @@ pub fn _mm512_bsrli_epi128(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_alignr_epi8(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_alignr_epi8(a: __m512i, b: __m512i) -> __m512i { const fn mask(shift: u32, i: u32) -> u32 { let shift = shift % 16; let mod_i = i % 16; @@ -11454,7 +12344,8 @@ pub fn _mm512_alignr_epi8(a: __m512i, b: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_alignr_epi8( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_alignr_epi8( src: __m512i, k: __mmask64, a: __m512i, @@ -11475,7 +12366,12 @@ pub fn _mm512_mask_alignr_epi8( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_alignr_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_alignr_epi8( + k: __mmask64, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = _mm512_alignr_epi8::(a, b); @@ -11491,7 +12387,8 @@ pub fn _mm512_maskz_alignr_epi8(k: __mmask64, a: __m512i, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(4)] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] -pub fn _mm256_mask_alignr_epi8( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_alignr_epi8( src: __m256i, k: __mmask32, a: __m256i, @@ -11512,7 +12409,12 @@ pub fn _mm256_mask_alignr_epi8( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] -pub fn _mm256_maskz_alignr_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_alignr_epi8( + k: __mmask32, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = _mm256_alignr_epi8::(a, b); @@ -11528,7 +12430,8 @@ pub fn _mm256_maskz_alignr_epi8(k: __mmask32, a: __m256i, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(4)] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] -pub fn _mm_mask_alignr_epi8( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_alignr_epi8( src: __m128i, k: __mmask16, a: __m128i, @@ -11549,7 +12452,12 @@ pub fn _mm_mask_alignr_epi8( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] -pub fn _mm_maskz_alignr_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_alignr_epi8( + k: __mmask16, + a: __m128i, + b: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = _mm_alignr_epi8::(a, b); @@ -11597,7 +12505,8 @@ pub unsafe fn _mm_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub unsafe fn _mm512_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const unsafe fn _mm512_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, a: __m512i) { let result = _mm512_cvtepi16_epi8(a).as_i8x32(); let mask = simd_select_bitmask(k, i8x32::splat(!0), i8x32::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, result); @@ -11610,7 +12519,8 @@ pub unsafe fn _mm512_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub unsafe fn _mm256_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const unsafe fn _mm256_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m256i) { let result = _mm256_cvtepi16_epi8(a).as_i8x16(); let mask = simd_select_bitmask(k, i8x16::splat(!0), i8x16::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, result); @@ -11623,7 +12533,8 @@ pub unsafe fn _mm256_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, #[target_feature(enable = "avx512bw,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovwb))] -pub unsafe fn _mm_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const unsafe fn _mm_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) { let result: i8x8 = simd_shuffle!( _mm_cvtepi16_epi8(a).as_i8x16(), i8x16::ZERO, @@ -11742,6 +12653,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; @@ -11750,7 +12662,7 @@ mod tests { use crate::mem::{self}; #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_abs_epi16() { + const unsafe fn test_mm512_abs_epi16() { let a = _mm512_set1_epi16(-1); let r = _mm512_abs_epi16(a); let e = _mm512_set1_epi16(1); @@ -11758,7 +12670,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_abs_epi16() { + const unsafe fn test_mm512_mask_abs_epi16() { let a = _mm512_set1_epi16(-1); let r = _mm512_mask_abs_epi16(a, 0, a); assert_eq_m512i(r, a); @@ -11770,7 +12682,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_abs_epi16() { + const unsafe fn test_mm512_maskz_abs_epi16() { let a = _mm512_set1_epi16(-1); let r = _mm512_maskz_abs_epi16(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -11782,7 +12694,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_abs_epi16() { + const unsafe fn test_mm256_mask_abs_epi16() { let a = _mm256_set1_epi16(-1); let r = _mm256_mask_abs_epi16(a, 0, a); assert_eq_m256i(r, a); @@ -11792,7 +12704,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_abs_epi16() { + const unsafe fn test_mm256_maskz_abs_epi16() { let a = _mm256_set1_epi16(-1); let r = _mm256_maskz_abs_epi16(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -11802,7 +12714,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_abs_epi16() { + const unsafe fn test_mm_mask_abs_epi16() { let a = _mm_set1_epi16(-1); let r = _mm_mask_abs_epi16(a, 0, a); assert_eq_m128i(r, a); @@ -11812,7 +12724,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_abs_epi16() { + const unsafe fn test_mm_maskz_abs_epi16() { let a = _mm_set1_epi16(-1); let r = _mm_maskz_abs_epi16(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -11822,7 +12734,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_abs_epi8() { + const unsafe fn test_mm512_abs_epi8() { let a = _mm512_set1_epi8(-1); let r = _mm512_abs_epi8(a); let e = _mm512_set1_epi8(1); @@ -11830,7 +12742,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_abs_epi8() { + const unsafe fn test_mm512_mask_abs_epi8() { let a = _mm512_set1_epi8(-1); let r = _mm512_mask_abs_epi8(a, 0, a); assert_eq_m512i(r, a); @@ -11848,7 +12760,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_abs_epi8() { + const unsafe fn test_mm512_maskz_abs_epi8() { let a = _mm512_set1_epi8(-1); let r = _mm512_maskz_abs_epi8(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -11865,7 +12777,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_abs_epi8() { + const unsafe fn test_mm256_mask_abs_epi8() { let a = _mm256_set1_epi8(-1); let r = _mm256_mask_abs_epi8(a, 0, a); assert_eq_m256i(r, a); @@ -11877,7 +12789,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_abs_epi8() { + const unsafe fn test_mm256_maskz_abs_epi8() { let a = _mm256_set1_epi8(-1); let r = _mm256_maskz_abs_epi8(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -11889,7 +12801,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_abs_epi8() { + const unsafe fn test_mm_mask_abs_epi8() { let a = _mm_set1_epi8(-1); let r = _mm_mask_abs_epi8(a, 0, a); assert_eq_m128i(r, a); @@ -11899,7 +12811,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_abs_epi8() { + const unsafe fn test_mm_maskz_abs_epi8() { let a = _mm_set1_epi8(-1); let r = _mm_maskz_abs_epi8(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -11910,7 +12822,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_add_epi16() { + const unsafe fn test_mm512_add_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(2); let r = _mm512_add_epi16(a, b); @@ -11919,7 +12831,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_add_epi16() { + const unsafe fn test_mm512_mask_add_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(2); let r = _mm512_mask_add_epi16(a, 0, a, b); @@ -11932,7 +12844,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_add_epi16() { + const unsafe fn test_mm512_maskz_add_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(2); let r = _mm512_maskz_add_epi16(0, a, b); @@ -11945,7 +12857,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_add_epi16() { + const unsafe fn test_mm256_mask_add_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(2); let r = _mm256_mask_add_epi16(a, 0, a, b); @@ -11956,7 +12868,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_add_epi16() { + const unsafe fn test_mm256_maskz_add_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(2); let r = _mm256_maskz_add_epi16(0, a, b); @@ -11967,7 +12879,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_add_epi16() { + const unsafe fn test_mm_mask_add_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(2); let r = _mm_mask_add_epi16(a, 0, a, b); @@ -11978,7 +12890,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_add_epi16() { + const unsafe fn test_mm_maskz_add_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(2); let r = _mm_maskz_add_epi16(0, a, b); @@ -11989,7 +12901,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_add_epi8() { + const unsafe fn test_mm512_add_epi8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(2); let r = _mm512_add_epi8(a, b); @@ -11998,7 +12910,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_add_epi8() { + const unsafe fn test_mm512_mask_add_epi8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(2); let r = _mm512_mask_add_epi8(a, 0, a, b); @@ -12018,7 +12930,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_add_epi8() { + const unsafe fn test_mm512_maskz_add_epi8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(2); let r = _mm512_maskz_add_epi8(0, a, b); @@ -12037,7 +12949,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_add_epi8() { + const unsafe fn test_mm256_mask_add_epi8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(2); let r = _mm256_mask_add_epi8(a, 0, a, b); @@ -12050,7 +12962,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_add_epi8() { + const unsafe fn test_mm256_maskz_add_epi8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(2); let r = _mm256_maskz_add_epi8(0, a, b); @@ -12063,7 +12975,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_add_epi8() { + const unsafe fn test_mm_mask_add_epi8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(2); let r = _mm_mask_add_epi8(a, 0, a, b); @@ -12074,7 +12986,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_add_epi8() { + const unsafe fn test_mm_maskz_add_epi8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(2); let r = _mm_maskz_add_epi8(0, a, b); @@ -12085,7 +12997,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_adds_epu16() { + const unsafe fn test_mm512_adds_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(u16::MAX as i16); let r = _mm512_adds_epu16(a, b); @@ -12094,7 +13006,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_adds_epu16() { + const unsafe fn test_mm512_mask_adds_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(u16::MAX as i16); let r = _mm512_mask_adds_epu16(a, 0, a, b); @@ -12107,7 +13019,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_adds_epu16() { + const unsafe fn test_mm512_maskz_adds_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(u16::MAX as i16); let r = _mm512_maskz_adds_epu16(0, a, b); @@ -12120,7 +13032,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_adds_epu16() { + const unsafe fn test_mm256_mask_adds_epu16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(u16::MAX as i16); let r = _mm256_mask_adds_epu16(a, 0, a, b); @@ -12132,7 +13044,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_adds_epu16() { + const unsafe fn test_mm256_maskz_adds_epu16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(u16::MAX as i16); let r = _mm256_maskz_adds_epu16(0, a, b); @@ -12144,7 +13056,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_adds_epu16() { + const unsafe fn test_mm_mask_adds_epu16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(u16::MAX as i16); let r = _mm_mask_adds_epu16(a, 0, a, b); @@ -12156,7 +13068,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_adds_epu16() { + const unsafe fn test_mm_maskz_adds_epu16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(u16::MAX as i16); let r = _mm_maskz_adds_epu16(0, a, b); @@ -12168,7 +13080,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_adds_epu8() { + const unsafe fn test_mm512_adds_epu8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(u8::MAX as i8); let r = _mm512_adds_epu8(a, b); @@ -12177,7 +13089,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_adds_epu8() { + const unsafe fn test_mm512_mask_adds_epu8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(u8::MAX as i8); let r = _mm512_mask_adds_epu8(a, 0, a, b); @@ -12197,7 +13109,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_adds_epu8() { + const unsafe fn test_mm512_maskz_adds_epu8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(u8::MAX as i8); let r = _mm512_maskz_adds_epu8(0, a, b); @@ -12216,7 +13128,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_adds_epu8() { + const unsafe fn test_mm256_mask_adds_epu8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(u8::MAX as i8); let r = _mm256_mask_adds_epu8(a, 0, a, b); @@ -12229,7 +13141,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_adds_epu8() { + const unsafe fn test_mm256_maskz_adds_epu8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(u8::MAX as i8); let r = _mm256_maskz_adds_epu8(0, a, b); @@ -12242,7 +13154,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_adds_epu8() { + const unsafe fn test_mm_mask_adds_epu8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(u8::MAX as i8); let r = _mm_mask_adds_epu8(a, 0, a, b); @@ -12254,7 +13166,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_adds_epu8() { + const unsafe fn test_mm_maskz_adds_epu8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(u8::MAX as i8); let r = _mm_maskz_adds_epu8(0, a, b); @@ -12266,7 +13178,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_adds_epi16() { + const unsafe fn test_mm512_adds_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(i16::MAX); let r = _mm512_adds_epi16(a, b); @@ -12275,7 +13187,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_adds_epi16() { + const unsafe fn test_mm512_mask_adds_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(i16::MAX); let r = _mm512_mask_adds_epi16(a, 0, a, b); @@ -12288,7 +13200,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_adds_epi16() { + const unsafe fn test_mm512_maskz_adds_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(i16::MAX); let r = _mm512_maskz_adds_epi16(0, a, b); @@ -12301,7 +13213,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_adds_epi16() { + const unsafe fn test_mm256_mask_adds_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(i16::MAX); let r = _mm256_mask_adds_epi16(a, 0, a, b); @@ -12313,7 +13225,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_adds_epi16() { + const unsafe fn test_mm256_maskz_adds_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(i16::MAX); let r = _mm256_maskz_adds_epi16(0, a, b); @@ -12325,7 +13237,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_adds_epi16() { + const unsafe fn test_mm_mask_adds_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(i16::MAX); let r = _mm_mask_adds_epi16(a, 0, a, b); @@ -12336,7 +13248,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_adds_epi16() { + const unsafe fn test_mm_maskz_adds_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(i16::MAX); let r = _mm_maskz_adds_epi16(0, a, b); @@ -12347,7 +13259,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_adds_epi8() { + const unsafe fn test_mm512_adds_epi8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(i8::MAX); let r = _mm512_adds_epi8(a, b); @@ -12356,7 +13268,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_adds_epi8() { + const unsafe fn test_mm512_mask_adds_epi8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(i8::MAX); let r = _mm512_mask_adds_epi8(a, 0, a, b); @@ -12376,7 +13288,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_adds_epi8() { + const unsafe fn test_mm512_maskz_adds_epi8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(i8::MAX); let r = _mm512_maskz_adds_epi8(0, a, b); @@ -12395,7 +13307,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_adds_epi8() { + const unsafe fn test_mm256_mask_adds_epi8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(i8::MAX); let r = _mm256_mask_adds_epi8(a, 0, a, b); @@ -12408,7 +13320,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_adds_epi8() { + const unsafe fn test_mm256_maskz_adds_epi8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(i8::MAX); let r = _mm256_maskz_adds_epi8(0, a, b); @@ -12421,7 +13333,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_adds_epi8() { + const unsafe fn test_mm_mask_adds_epi8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(i8::MAX); let r = _mm_mask_adds_epi8(a, 0, a, b); @@ -12433,7 +13345,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_adds_epi8() { + const unsafe fn test_mm_maskz_adds_epi8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(i8::MAX); let r = _mm_maskz_adds_epi8(0, a, b); @@ -12445,7 +13357,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_sub_epi16() { + const unsafe fn test_mm512_sub_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(2); let r = _mm512_sub_epi16(a, b); @@ -12454,7 +13366,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_sub_epi16() { + const unsafe fn test_mm512_mask_sub_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(2); let r = _mm512_mask_sub_epi16(a, 0, a, b); @@ -12467,7 +13379,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_sub_epi16() { + const unsafe fn test_mm512_maskz_sub_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(2); let r = _mm512_maskz_sub_epi16(0, a, b); @@ -12480,7 +13392,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_sub_epi16() { + const unsafe fn test_mm256_mask_sub_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(2); let r = _mm256_mask_sub_epi16(a, 0, a, b); @@ -12491,7 +13403,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_sub_epi16() { + const unsafe fn test_mm256_maskz_sub_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(2); let r = _mm256_maskz_sub_epi16(0, a, b); @@ -12502,7 +13414,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_sub_epi16() { + const unsafe fn test_mm_mask_sub_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(2); let r = _mm_mask_sub_epi16(a, 0, a, b); @@ -12513,7 +13425,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_sub_epi16() { + const unsafe fn test_mm_maskz_sub_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(2); let r = _mm_maskz_sub_epi16(0, a, b); @@ -12524,7 +13436,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_sub_epi8() { + const unsafe fn test_mm512_sub_epi8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(2); let r = _mm512_sub_epi8(a, b); @@ -12533,7 +13445,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_sub_epi8() { + const unsafe fn test_mm512_mask_sub_epi8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(2); let r = _mm512_mask_sub_epi8(a, 0, a, b); @@ -12553,7 +13465,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_sub_epi8() { + const unsafe fn test_mm512_maskz_sub_epi8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(2); let r = _mm512_maskz_sub_epi8(0, a, b); @@ -12572,7 +13484,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_sub_epi8() { + const unsafe fn test_mm256_mask_sub_epi8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(2); let r = _mm256_mask_sub_epi8(a, 0, a, b); @@ -12585,7 +13497,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_sub_epi8() { + const unsafe fn test_mm256_maskz_sub_epi8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(2); let r = _mm256_maskz_sub_epi8(0, a, b); @@ -12598,7 +13510,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_sub_epi8() { + const unsafe fn test_mm_mask_sub_epi8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(2); let r = _mm_mask_sub_epi8(a, 0, a, b); @@ -12609,7 +13521,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_sub_epi8() { + const unsafe fn test_mm_maskz_sub_epi8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(2); let r = _mm_maskz_sub_epi8(0, a, b); @@ -12620,7 +13532,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_subs_epu16() { + const unsafe fn test_mm512_subs_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(u16::MAX as i16); let r = _mm512_subs_epu16(a, b); @@ -12629,7 +13541,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_subs_epu16() { + const unsafe fn test_mm512_mask_subs_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(u16::MAX as i16); let r = _mm512_mask_subs_epu16(a, 0, a, b); @@ -12642,7 +13554,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_subs_epu16() { + const unsafe fn test_mm512_maskz_subs_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(u16::MAX as i16); let r = _mm512_maskz_subs_epu16(0, a, b); @@ -12655,7 +13567,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_subs_epu16() { + const unsafe fn test_mm256_mask_subs_epu16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(u16::MAX as i16); let r = _mm256_mask_subs_epu16(a, 0, a, b); @@ -12666,7 +13578,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_subs_epu16() { + const unsafe fn test_mm256_maskz_subs_epu16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(u16::MAX as i16); let r = _mm256_maskz_subs_epu16(0, a, b); @@ -12677,7 +13589,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_subs_epu16() { + const unsafe fn test_mm_mask_subs_epu16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(u16::MAX as i16); let r = _mm_mask_subs_epu16(a, 0, a, b); @@ -12688,7 +13600,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_subs_epu16() { + const unsafe fn test_mm_maskz_subs_epu16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(u16::MAX as i16); let r = _mm_maskz_subs_epu16(0, a, b); @@ -12699,7 +13611,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_subs_epu8() { + const unsafe fn test_mm512_subs_epu8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(u8::MAX as i8); let r = _mm512_subs_epu8(a, b); @@ -12708,7 +13620,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_subs_epu8() { + const unsafe fn test_mm512_mask_subs_epu8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(u8::MAX as i8); let r = _mm512_mask_subs_epu8(a, 0, a, b); @@ -12728,7 +13640,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_subs_epu8() { + const unsafe fn test_mm512_maskz_subs_epu8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(u8::MAX as i8); let r = _mm512_maskz_subs_epu8(0, a, b); @@ -12747,7 +13659,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_subs_epu8() { + const unsafe fn test_mm256_mask_subs_epu8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(u8::MAX as i8); let r = _mm256_mask_subs_epu8(a, 0, a, b); @@ -12760,7 +13672,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_subs_epu8() { + const unsafe fn test_mm256_maskz_subs_epu8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(u8::MAX as i8); let r = _mm256_maskz_subs_epu8(0, a, b); @@ -12773,7 +13685,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_subs_epu8() { + const unsafe fn test_mm_mask_subs_epu8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(u8::MAX as i8); let r = _mm_mask_subs_epu8(a, 0, a, b); @@ -12784,7 +13696,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_subs_epu8() { + const unsafe fn test_mm_maskz_subs_epu8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(u8::MAX as i8); let r = _mm_maskz_subs_epu8(0, a, b); @@ -12795,7 +13707,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_subs_epi16() { + const unsafe fn test_mm512_subs_epi16() { let a = _mm512_set1_epi16(-1); let b = _mm512_set1_epi16(i16::MAX); let r = _mm512_subs_epi16(a, b); @@ -12804,7 +13716,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_subs_epi16() { + const unsafe fn test_mm512_mask_subs_epi16() { let a = _mm512_set1_epi16(-1); let b = _mm512_set1_epi16(i16::MAX); let r = _mm512_mask_subs_epi16(a, 0, a, b); @@ -12817,7 +13729,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_subs_epi16() { + const unsafe fn test_mm512_maskz_subs_epi16() { let a = _mm512_set1_epi16(-1); let b = _mm512_set1_epi16(i16::MAX); let r = _mm512_maskz_subs_epi16(0, a, b); @@ -12830,7 +13742,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_subs_epi16() { + const unsafe fn test_mm256_mask_subs_epi16() { let a = _mm256_set1_epi16(-1); let b = _mm256_set1_epi16(i16::MAX); let r = _mm256_mask_subs_epi16(a, 0, a, b); @@ -12842,7 +13754,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_subs_epi16() { + const unsafe fn test_mm256_maskz_subs_epi16() { let a = _mm256_set1_epi16(-1); let b = _mm256_set1_epi16(i16::MAX); let r = _mm256_maskz_subs_epi16(0, a, b); @@ -12854,7 +13766,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_subs_epi16() { + const unsafe fn test_mm_mask_subs_epi16() { let a = _mm_set1_epi16(-1); let b = _mm_set1_epi16(i16::MAX); let r = _mm_mask_subs_epi16(a, 0, a, b); @@ -12865,7 +13777,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_subs_epi16() { + const unsafe fn test_mm_maskz_subs_epi16() { let a = _mm_set1_epi16(-1); let b = _mm_set1_epi16(i16::MAX); let r = _mm_maskz_subs_epi16(0, a, b); @@ -12876,7 +13788,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_subs_epi8() { + const unsafe fn test_mm512_subs_epi8() { let a = _mm512_set1_epi8(-1); let b = _mm512_set1_epi8(i8::MAX); let r = _mm512_subs_epi8(a, b); @@ -12885,7 +13797,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_subs_epi8() { + const unsafe fn test_mm512_mask_subs_epi8() { let a = _mm512_set1_epi8(-1); let b = _mm512_set1_epi8(i8::MAX); let r = _mm512_mask_subs_epi8(a, 0, a, b); @@ -12905,7 +13817,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_subs_epi8() { + const unsafe fn test_mm512_maskz_subs_epi8() { let a = _mm512_set1_epi8(-1); let b = _mm512_set1_epi8(i8::MAX); let r = _mm512_maskz_subs_epi8(0, a, b); @@ -12924,7 +13836,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_subs_epi8() { + const unsafe fn test_mm256_mask_subs_epi8() { let a = _mm256_set1_epi8(-1); let b = _mm256_set1_epi8(i8::MAX); let r = _mm256_mask_subs_epi8(a, 0, a, b); @@ -12937,7 +13849,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_subs_epi8() { + const unsafe fn test_mm256_maskz_subs_epi8() { let a = _mm256_set1_epi8(-1); let b = _mm256_set1_epi8(i8::MAX); let r = _mm256_maskz_subs_epi8(0, a, b); @@ -12950,7 +13862,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_subs_epi8() { + const unsafe fn test_mm_mask_subs_epi8() { let a = _mm_set1_epi8(-1); let b = _mm_set1_epi8(i8::MAX); let r = _mm_mask_subs_epi8(a, 0, a, b); @@ -12962,7 +13874,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_subs_epi8() { + const unsafe fn test_mm_maskz_subs_epi8() { let a = _mm_set1_epi8(-1); let b = _mm_set1_epi8(i8::MAX); let r = _mm_maskz_subs_epi8(0, a, b); @@ -12974,7 +13886,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mulhi_epu16() { + const unsafe fn test_mm512_mulhi_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_mulhi_epu16(a, b); @@ -12983,7 +13895,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_mulhi_epu16() { + const unsafe fn test_mm512_mask_mulhi_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_mask_mulhi_epu16(a, 0, a, b); @@ -12996,7 +13908,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_mulhi_epu16() { + const unsafe fn test_mm512_maskz_mulhi_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_maskz_mulhi_epu16(0, a, b); @@ -13009,7 +13921,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_mulhi_epu16() { + const unsafe fn test_mm256_mask_mulhi_epu16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let r = _mm256_mask_mulhi_epu16(a, 0, a, b); @@ -13020,7 +13932,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_mulhi_epu16() { + const unsafe fn test_mm256_maskz_mulhi_epu16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let r = _mm256_maskz_mulhi_epu16(0, a, b); @@ -13031,7 +13943,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_mulhi_epu16() { + const unsafe fn test_mm_mask_mulhi_epu16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let r = _mm_mask_mulhi_epu16(a, 0, a, b); @@ -13042,7 +13954,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_mulhi_epu16() { + const unsafe fn test_mm_maskz_mulhi_epu16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let r = _mm_maskz_mulhi_epu16(0, a, b); @@ -13053,7 +13965,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mulhi_epi16() { + const unsafe fn test_mm512_mulhi_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_mulhi_epi16(a, b); @@ -13062,7 +13974,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_mulhi_epi16() { + const unsafe fn test_mm512_mask_mulhi_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_mask_mulhi_epi16(a, 0, a, b); @@ -13075,7 +13987,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_mulhi_epi16() { + const unsafe fn test_mm512_maskz_mulhi_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_maskz_mulhi_epi16(0, a, b); @@ -13088,7 +14000,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_mulhi_epi16() { + const unsafe fn test_mm256_mask_mulhi_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let r = _mm256_mask_mulhi_epi16(a, 0, a, b); @@ -13099,7 +14011,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_mulhi_epi16() { + const unsafe fn test_mm256_maskz_mulhi_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let r = _mm256_maskz_mulhi_epi16(0, a, b); @@ -13110,7 +14022,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_mulhi_epi16() { + const unsafe fn test_mm_mask_mulhi_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let r = _mm_mask_mulhi_epi16(a, 0, a, b); @@ -13121,7 +14033,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_mulhi_epi16() { + const unsafe fn test_mm_maskz_mulhi_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let r = _mm_maskz_mulhi_epi16(0, a, b); @@ -13211,7 +14123,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mullo_epi16() { + const unsafe fn test_mm512_mullo_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_mullo_epi16(a, b); @@ -13220,7 +14132,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_mullo_epi16() { + const unsafe fn test_mm512_mask_mullo_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_mask_mullo_epi16(a, 0, a, b); @@ -13233,7 +14145,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_mullo_epi16() { + const unsafe fn test_mm512_maskz_mullo_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_maskz_mullo_epi16(0, a, b); @@ -13246,7 +14158,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_mullo_epi16() { + const unsafe fn test_mm256_mask_mullo_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let r = _mm256_mask_mullo_epi16(a, 0, a, b); @@ -13257,7 +14169,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_mullo_epi16() { + const unsafe fn test_mm256_maskz_mullo_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let r = _mm256_maskz_mullo_epi16(0, a, b); @@ -13268,7 +14180,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_mullo_epi16() { + const unsafe fn test_mm_mask_mullo_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let r = _mm_mask_mullo_epi16(a, 0, a, b); @@ -13279,7 +14191,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_mullo_epi16() { + const unsafe fn test_mm_maskz_mullo_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let r = _mm_maskz_mullo_epi16(0, a, b); @@ -13290,7 +14202,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_max_epu16() { + const unsafe fn test_mm512_max_epu16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13305,7 +14217,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_max_epu16() { + const unsafe fn test_mm512_mask_max_epu16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13322,7 +14234,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_max_epu16() { + const unsafe fn test_mm512_maskz_max_epu16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13339,7 +14251,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_max_epu16() { + const unsafe fn test_mm256_mask_max_epu16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm256_set_epi16(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_mask_max_epu16(a, 0, a, b); @@ -13350,7 +14262,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_max_epu16() { + const unsafe fn test_mm256_maskz_max_epu16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm256_set_epi16(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_maskz_max_epu16(0, a, b); @@ -13361,7 +14273,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_max_epu16() { + const unsafe fn test_mm_mask_max_epu16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_set_epi16(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_mask_max_epu16(a, 0, a, b); @@ -13372,7 +14284,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_max_epu16() { + const unsafe fn test_mm_maskz_max_epu16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_set_epi16(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_maskz_max_epu16(0, a, b); @@ -13383,7 +14295,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_max_epu8() { + const unsafe fn test_mm512_max_epu8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -13404,7 +14316,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_max_epu8() { + const unsafe fn test_mm512_mask_max_epu8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -13432,7 +14344,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_max_epu8() { + const unsafe fn test_mm512_maskz_max_epu8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -13459,7 +14371,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_max_epu8() { + const unsafe fn test_mm256_mask_max_epu8() { #[rustfmt::skip] let a = _mm256_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13476,7 +14388,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_max_epu8() { + const unsafe fn test_mm256_maskz_max_epu8() { #[rustfmt::skip] let a = _mm256_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13493,7 +14405,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_max_epu8() { + const unsafe fn test_mm_mask_max_epu8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_mask_max_epu8(a, 0, a, b); @@ -13504,7 +14416,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_max_epu8() { + const unsafe fn test_mm_maskz_max_epu8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_maskz_max_epu8(0, a, b); @@ -13515,7 +14427,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_max_epi16() { + const unsafe fn test_mm512_max_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13530,7 +14442,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_max_epi16() { + const unsafe fn test_mm512_mask_max_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13547,7 +14459,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_max_epi16() { + const unsafe fn test_mm512_maskz_max_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13564,7 +14476,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_max_epi16() { + const unsafe fn test_mm256_mask_max_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm256_set_epi16(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_mask_max_epi16(a, 0, a, b); @@ -13575,7 +14487,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_max_epi16() { + const unsafe fn test_mm256_maskz_max_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm256_set_epi16(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_maskz_max_epi16(0, a, b); @@ -13586,7 +14498,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_max_epi16() { + const unsafe fn test_mm_mask_max_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_set_epi16(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_mask_max_epi16(a, 0, a, b); @@ -13597,7 +14509,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_max_epi16() { + const unsafe fn test_mm_maskz_max_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_set_epi16(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_maskz_max_epi16(0, a, b); @@ -13608,7 +14520,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_max_epi8() { + const unsafe fn test_mm512_max_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -13629,7 +14541,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_max_epi8() { + const unsafe fn test_mm512_mask_max_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -13657,7 +14569,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_max_epi8() { + const unsafe fn test_mm512_maskz_max_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -13684,7 +14596,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_max_epi8() { + const unsafe fn test_mm256_mask_max_epi8() { #[rustfmt::skip] let a = _mm256_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13701,7 +14613,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_max_epi8() { + const unsafe fn test_mm256_maskz_max_epi8() { #[rustfmt::skip] let a = _mm256_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13718,7 +14630,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_max_epi8() { + const unsafe fn test_mm_mask_max_epi8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_mask_max_epi8(a, 0, a, b); @@ -13729,7 +14641,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_max_epi8() { + const unsafe fn test_mm_maskz_max_epi8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_maskz_max_epi8(0, a, b); @@ -13740,7 +14652,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_min_epu16() { + const unsafe fn test_mm512_min_epu16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13755,7 +14667,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_min_epu16() { + const unsafe fn test_mm512_mask_min_epu16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13772,7 +14684,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_min_epu16() { + const unsafe fn test_mm512_maskz_min_epu16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13789,7 +14701,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_min_epu16() { + const unsafe fn test_mm256_mask_min_epu16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm256_set_epi16(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_mask_min_epu16(a, 0, a, b); @@ -13800,7 +14712,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_min_epu16() { + const unsafe fn test_mm256_maskz_min_epu16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm256_set_epi16(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_maskz_min_epu16(0, a, b); @@ -13811,7 +14723,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_min_epu16() { + const unsafe fn test_mm_mask_min_epu16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_set_epi16(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_mask_min_epu16(a, 0, a, b); @@ -13822,7 +14734,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_min_epu16() { + const unsafe fn test_mm_maskz_min_epu16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_set_epi16(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_maskz_min_epu16(0, a, b); @@ -13833,7 +14745,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_min_epu8() { + const unsafe fn test_mm512_min_epu8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -13854,7 +14766,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_min_epu8() { + const unsafe fn test_mm512_mask_min_epu8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -13882,7 +14794,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_min_epu8() { + const unsafe fn test_mm512_maskz_min_epu8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -13909,7 +14821,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_min_epu8() { + const unsafe fn test_mm256_mask_min_epu8() { #[rustfmt::skip] let a = _mm256_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13926,7 +14838,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_min_epu8() { + const unsafe fn test_mm256_maskz_min_epu8() { #[rustfmt::skip] let a = _mm256_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13943,7 +14855,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_min_epu8() { + const unsafe fn test_mm_mask_min_epu8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_mask_min_epu8(a, 0, a, b); @@ -13954,7 +14866,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_min_epu8() { + const unsafe fn test_mm_maskz_min_epu8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_maskz_min_epu8(0, a, b); @@ -13965,7 +14877,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_min_epi16() { + const unsafe fn test_mm512_min_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13980,7 +14892,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_min_epi16() { + const unsafe fn test_mm512_mask_min_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -13997,7 +14909,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_min_epi16() { + const unsafe fn test_mm512_maskz_min_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -14014,7 +14926,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_min_epi16() { + const unsafe fn test_mm256_mask_min_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm256_set_epi16(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_mask_min_epi16(a, 0, a, b); @@ -14025,7 +14937,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_min_epi16() { + const unsafe fn test_mm256_maskz_min_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm256_set_epi16(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_maskz_min_epi16(0, a, b); @@ -14036,7 +14948,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_min_epi16() { + const unsafe fn test_mm_mask_min_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_set_epi16(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_mask_min_epi16(a, 0, a, b); @@ -14047,7 +14959,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_min_epi16() { + const unsafe fn test_mm_maskz_min_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_set_epi16(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_maskz_min_epi16(0, a, b); @@ -14058,7 +14970,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_min_epi8() { + const unsafe fn test_mm512_min_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -14079,7 +14991,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_min_epi8() { + const unsafe fn test_mm512_mask_min_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -14107,7 +15019,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_min_epi8() { + const unsafe fn test_mm512_maskz_min_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -14134,7 +15046,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_min_epi8() { + const unsafe fn test_mm256_mask_min_epi8() { #[rustfmt::skip] let a = _mm256_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -14151,7 +15063,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_min_epi8() { + const unsafe fn test_mm256_maskz_min_epi8() { #[rustfmt::skip] let a = _mm256_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -14168,7 +15080,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_min_epi8() { + const unsafe fn test_mm_mask_min_epi8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_mask_min_epi8(a, 0, a, b); @@ -14179,7 +15091,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_min_epi8() { + const unsafe fn test_mm_maskz_min_epi8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_maskz_min_epi8(0, a, b); @@ -14190,7 +15102,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmplt_epu16_mask() { + const unsafe fn test_mm512_cmplt_epu16_mask() { let a = _mm512_set1_epi16(-2); let b = _mm512_set1_epi16(-1); let m = _mm512_cmplt_epu16_mask(a, b); @@ -14198,7 +15110,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmplt_epu16_mask() { + const unsafe fn test_mm512_mask_cmplt_epu16_mask() { let a = _mm512_set1_epi16(-2); let b = _mm512_set1_epi16(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14207,7 +15119,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmplt_epu16_mask() { + const unsafe fn test_mm256_cmplt_epu16_mask() { let a = _mm256_set1_epi16(-2); let b = _mm256_set1_epi16(-1); let m = _mm256_cmplt_epu16_mask(a, b); @@ -14215,7 +15127,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmplt_epu16_mask() { + const unsafe fn test_mm256_mask_cmplt_epu16_mask() { let a = _mm256_set1_epi16(-2); let b = _mm256_set1_epi16(-1); let mask = 0b01010101_01010101; @@ -14224,7 +15136,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmplt_epu16_mask() { + const unsafe fn test_mm_cmplt_epu16_mask() { let a = _mm_set1_epi16(-2); let b = _mm_set1_epi16(-1); let m = _mm_cmplt_epu16_mask(a, b); @@ -14232,7 +15144,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmplt_epu16_mask() { + const unsafe fn test_mm_mask_cmplt_epu16_mask() { let a = _mm_set1_epi16(-2); let b = _mm_set1_epi16(-1); let mask = 0b01010101; @@ -14241,7 +15153,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmplt_epu8_mask() { + const unsafe fn test_mm512_cmplt_epu8_mask() { let a = _mm512_set1_epi8(-2); let b = _mm512_set1_epi8(-1); let m = _mm512_cmplt_epu8_mask(a, b); @@ -14252,7 +15164,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmplt_epu8_mask() { + const unsafe fn test_mm512_mask_cmplt_epu8_mask() { let a = _mm512_set1_epi8(-2); let b = _mm512_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -14264,7 +15176,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmplt_epu8_mask() { + const unsafe fn test_mm256_cmplt_epu8_mask() { let a = _mm256_set1_epi8(-2); let b = _mm256_set1_epi8(-1); let m = _mm256_cmplt_epu8_mask(a, b); @@ -14272,7 +15184,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmplt_epu8_mask() { + const unsafe fn test_mm256_mask_cmplt_epu8_mask() { let a = _mm256_set1_epi8(-2); let b = _mm256_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14281,7 +15193,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmplt_epu8_mask() { + const unsafe fn test_mm_cmplt_epu8_mask() { let a = _mm_set1_epi8(-2); let b = _mm_set1_epi8(-1); let m = _mm_cmplt_epu8_mask(a, b); @@ -14289,7 +15201,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmplt_epu8_mask() { + const unsafe fn test_mm_mask_cmplt_epu8_mask() { let a = _mm_set1_epi8(-2); let b = _mm_set1_epi8(-1); let mask = 0b01010101_01010101; @@ -14298,7 +15210,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmplt_epi16_mask() { + const unsafe fn test_mm512_cmplt_epi16_mask() { let a = _mm512_set1_epi16(-2); let b = _mm512_set1_epi16(-1); let m = _mm512_cmplt_epi16_mask(a, b); @@ -14306,7 +15218,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmplt_epi16_mask() { + const unsafe fn test_mm512_mask_cmplt_epi16_mask() { let a = _mm512_set1_epi16(-2); let b = _mm512_set1_epi16(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14315,7 +15227,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmplt_epi16_mask() { + const unsafe fn test_mm256_cmplt_epi16_mask() { let a = _mm256_set1_epi16(-2); let b = _mm256_set1_epi16(-1); let m = _mm256_cmplt_epi16_mask(a, b); @@ -14323,7 +15235,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmplt_epi16_mask() { + const unsafe fn test_mm256_mask_cmplt_epi16_mask() { let a = _mm256_set1_epi16(-2); let b = _mm256_set1_epi16(-1); let mask = 0b01010101_01010101; @@ -14332,7 +15244,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmplt_epi16_mask() { + const unsafe fn test_mm_cmplt_epi16_mask() { let a = _mm_set1_epi16(-2); let b = _mm_set1_epi16(-1); let m = _mm_cmplt_epi16_mask(a, b); @@ -14340,7 +15252,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmplt_epi16_mask() { + const unsafe fn test_mm_mask_cmplt_epi16_mask() { let a = _mm_set1_epi16(-2); let b = _mm_set1_epi16(-1); let mask = 0b01010101; @@ -14349,7 +15261,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmplt_epi8_mask() { + const unsafe fn test_mm512_cmplt_epi8_mask() { let a = _mm512_set1_epi8(-2); let b = _mm512_set1_epi8(-1); let m = _mm512_cmplt_epi8_mask(a, b); @@ -14360,7 +15272,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmplt_epi8_mask() { + const unsafe fn test_mm512_mask_cmplt_epi8_mask() { let a = _mm512_set1_epi8(-2); let b = _mm512_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -14372,7 +15284,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmplt_epi8_mask() { + const unsafe fn test_mm256_cmplt_epi8_mask() { let a = _mm256_set1_epi8(-2); let b = _mm256_set1_epi8(-1); let m = _mm256_cmplt_epi8_mask(a, b); @@ -14380,7 +15292,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmplt_epi8_mask() { + const unsafe fn test_mm256_mask_cmplt_epi8_mask() { let a = _mm256_set1_epi8(-2); let b = _mm256_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14389,7 +15301,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmplt_epi8_mask() { + const unsafe fn test_mm_cmplt_epi8_mask() { let a = _mm_set1_epi8(-2); let b = _mm_set1_epi8(-1); let m = _mm_cmplt_epi8_mask(a, b); @@ -14397,7 +15309,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmplt_epi8_mask() { + const unsafe fn test_mm_mask_cmplt_epi8_mask() { let a = _mm_set1_epi8(-2); let b = _mm_set1_epi8(-1); let mask = 0b01010101_01010101; @@ -14406,7 +15318,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpgt_epu16_mask() { + const unsafe fn test_mm512_cmpgt_epu16_mask() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(1); let m = _mm512_cmpgt_epu16_mask(a, b); @@ -14414,7 +15326,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpgt_epu16_mask() { + const unsafe fn test_mm512_mask_cmpgt_epu16_mask() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14423,7 +15335,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpgt_epu16_mask() { + const unsafe fn test_mm256_cmpgt_epu16_mask() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(1); let m = _mm256_cmpgt_epu16_mask(a, b); @@ -14431,7 +15343,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpgt_epu16_mask() { + const unsafe fn test_mm256_mask_cmpgt_epu16_mask() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(1); let mask = 0b01010101_01010101; @@ -14440,7 +15352,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpgt_epu16_mask() { + const unsafe fn test_mm_cmpgt_epu16_mask() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(1); let m = _mm_cmpgt_epu16_mask(a, b); @@ -14448,7 +15360,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpgt_epu16_mask() { + const unsafe fn test_mm_mask_cmpgt_epu16_mask() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(1); let mask = 0b01010101; @@ -14457,7 +15369,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpgt_epu8_mask() { + const unsafe fn test_mm512_cmpgt_epu8_mask() { let a = _mm512_set1_epi8(2); let b = _mm512_set1_epi8(1); let m = _mm512_cmpgt_epu8_mask(a, b); @@ -14468,7 +15380,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpgt_epu8_mask() { + const unsafe fn test_mm512_mask_cmpgt_epu8_mask() { let a = _mm512_set1_epi8(2); let b = _mm512_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -14480,7 +15392,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpgt_epu8_mask() { + const unsafe fn test_mm256_cmpgt_epu8_mask() { let a = _mm256_set1_epi8(2); let b = _mm256_set1_epi8(1); let m = _mm256_cmpgt_epu8_mask(a, b); @@ -14488,7 +15400,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpgt_epu8_mask() { + const unsafe fn test_mm256_mask_cmpgt_epu8_mask() { let a = _mm256_set1_epi8(2); let b = _mm256_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14497,7 +15409,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpgt_epu8_mask() { + const unsafe fn test_mm_cmpgt_epu8_mask() { let a = _mm_set1_epi8(2); let b = _mm_set1_epi8(1); let m = _mm_cmpgt_epu8_mask(a, b); @@ -14505,7 +15417,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpgt_epu8_mask() { + const unsafe fn test_mm_mask_cmpgt_epu8_mask() { let a = _mm_set1_epi8(2); let b = _mm_set1_epi8(1); let mask = 0b01010101_01010101; @@ -14514,7 +15426,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpgt_epi16_mask() { + const unsafe fn test_mm512_cmpgt_epi16_mask() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(-1); let m = _mm512_cmpgt_epi16_mask(a, b); @@ -14522,7 +15434,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpgt_epi16_mask() { + const unsafe fn test_mm512_mask_cmpgt_epi16_mask() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14531,7 +15443,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpgt_epi16_mask() { + const unsafe fn test_mm256_cmpgt_epi16_mask() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(-1); let m = _mm256_cmpgt_epi16_mask(a, b); @@ -14539,7 +15451,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpgt_epi16_mask() { + const unsafe fn test_mm256_mask_cmpgt_epi16_mask() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(-1); let mask = 0b001010101_01010101; @@ -14548,7 +15460,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpgt_epi16_mask() { + const unsafe fn test_mm_cmpgt_epi16_mask() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(-1); let m = _mm_cmpgt_epi16_mask(a, b); @@ -14556,7 +15468,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpgt_epi16_mask() { + const unsafe fn test_mm_mask_cmpgt_epi16_mask() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(-1); let mask = 0b01010101; @@ -14565,7 +15477,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpgt_epi8_mask() { + const unsafe fn test_mm512_cmpgt_epi8_mask() { let a = _mm512_set1_epi8(2); let b = _mm512_set1_epi8(-1); let m = _mm512_cmpgt_epi8_mask(a, b); @@ -14576,7 +15488,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpgt_epi8_mask() { + const unsafe fn test_mm512_mask_cmpgt_epi8_mask() { let a = _mm512_set1_epi8(2); let b = _mm512_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -14588,7 +15500,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpgt_epi8_mask() { + const unsafe fn test_mm256_cmpgt_epi8_mask() { let a = _mm256_set1_epi8(2); let b = _mm256_set1_epi8(-1); let m = _mm256_cmpgt_epi8_mask(a, b); @@ -14596,7 +15508,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpgt_epi8_mask() { + const unsafe fn test_mm256_mask_cmpgt_epi8_mask() { let a = _mm256_set1_epi8(2); let b = _mm256_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14605,7 +15517,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpgt_epi8_mask() { + const unsafe fn test_mm_cmpgt_epi8_mask() { let a = _mm_set1_epi8(2); let b = _mm_set1_epi8(-1); let m = _mm_cmpgt_epi8_mask(a, b); @@ -14613,7 +15525,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpgt_epi8_mask() { + const unsafe fn test_mm_mask_cmpgt_epi8_mask() { let a = _mm_set1_epi8(2); let b = _mm_set1_epi8(-1); let mask = 0b01010101_01010101; @@ -14622,7 +15534,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmple_epu16_mask() { + const unsafe fn test_mm512_cmple_epu16_mask() { let a = _mm512_set1_epi16(-1); let b = _mm512_set1_epi16(-1); let m = _mm512_cmple_epu16_mask(a, b); @@ -14630,7 +15542,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmple_epu16_mask() { + const unsafe fn test_mm512_mask_cmple_epu16_mask() { let a = _mm512_set1_epi16(-1); let b = _mm512_set1_epi16(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14639,7 +15551,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmple_epu16_mask() { + const unsafe fn test_mm256_cmple_epu16_mask() { let a = _mm256_set1_epi16(-1); let b = _mm256_set1_epi16(-1); let m = _mm256_cmple_epu16_mask(a, b); @@ -14647,7 +15559,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmple_epu16_mask() { + const unsafe fn test_mm256_mask_cmple_epu16_mask() { let a = _mm256_set1_epi16(-1); let b = _mm256_set1_epi16(-1); let mask = 0b01010101_01010101; @@ -14656,7 +15568,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmple_epu16_mask() { + const unsafe fn test_mm_cmple_epu16_mask() { let a = _mm_set1_epi16(-1); let b = _mm_set1_epi16(-1); let m = _mm_cmple_epu16_mask(a, b); @@ -14664,7 +15576,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmple_epu16_mask() { + const unsafe fn test_mm_mask_cmple_epu16_mask() { let a = _mm_set1_epi16(-1); let b = _mm_set1_epi16(-1); let mask = 0b01010101; @@ -14673,7 +15585,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmple_epu8_mask() { + const unsafe fn test_mm512_cmple_epu8_mask() { let a = _mm512_set1_epi8(-1); let b = _mm512_set1_epi8(-1); let m = _mm512_cmple_epu8_mask(a, b); @@ -14684,7 +15596,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmple_epu8_mask() { + const unsafe fn test_mm512_mask_cmple_epu8_mask() { let a = _mm512_set1_epi8(-1); let b = _mm512_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -14696,7 +15608,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmple_epu8_mask() { + const unsafe fn test_mm256_cmple_epu8_mask() { let a = _mm256_set1_epi8(-1); let b = _mm256_set1_epi8(-1); let m = _mm256_cmple_epu8_mask(a, b); @@ -14704,7 +15616,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmple_epu8_mask() { + const unsafe fn test_mm256_mask_cmple_epu8_mask() { let a = _mm256_set1_epi8(-1); let b = _mm256_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14713,7 +15625,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmple_epu8_mask() { + const unsafe fn test_mm_cmple_epu8_mask() { let a = _mm_set1_epi8(-1); let b = _mm_set1_epi8(-1); let m = _mm_cmple_epu8_mask(a, b); @@ -14721,7 +15633,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmple_epu8_mask() { + const unsafe fn test_mm_mask_cmple_epu8_mask() { let a = _mm_set1_epi8(-1); let b = _mm_set1_epi8(-1); let mask = 0b01010101_01010101; @@ -14730,7 +15642,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmple_epi16_mask() { + const unsafe fn test_mm512_cmple_epi16_mask() { let a = _mm512_set1_epi16(-1); let b = _mm512_set1_epi16(-1); let m = _mm512_cmple_epi16_mask(a, b); @@ -14738,7 +15650,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmple_epi16_mask() { + const unsafe fn test_mm512_mask_cmple_epi16_mask() { let a = _mm512_set1_epi16(-1); let b = _mm512_set1_epi16(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14747,7 +15659,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmple_epi16_mask() { + const unsafe fn test_mm256_cmple_epi16_mask() { let a = _mm256_set1_epi16(-1); let b = _mm256_set1_epi16(-1); let m = _mm256_cmple_epi16_mask(a, b); @@ -14755,7 +15667,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmple_epi16_mask() { + const unsafe fn test_mm256_mask_cmple_epi16_mask() { let a = _mm256_set1_epi16(-1); let b = _mm256_set1_epi16(-1); let mask = 0b01010101_01010101; @@ -14764,7 +15676,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmple_epi16_mask() { + const unsafe fn test_mm_cmple_epi16_mask() { let a = _mm_set1_epi16(-1); let b = _mm_set1_epi16(-1); let m = _mm_cmple_epi16_mask(a, b); @@ -14772,7 +15684,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmple_epi16_mask() { + const unsafe fn test_mm_mask_cmple_epi16_mask() { let a = _mm_set1_epi16(-1); let b = _mm_set1_epi16(-1); let mask = 0b01010101; @@ -14781,7 +15693,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmple_epi8_mask() { + const unsafe fn test_mm512_cmple_epi8_mask() { let a = _mm512_set1_epi8(-1); let b = _mm512_set1_epi8(-1); let m = _mm512_cmple_epi8_mask(a, b); @@ -14792,7 +15704,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmple_epi8_mask() { + const unsafe fn test_mm512_mask_cmple_epi8_mask() { let a = _mm512_set1_epi8(-1); let b = _mm512_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -14804,7 +15716,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmple_epi8_mask() { + const unsafe fn test_mm256_cmple_epi8_mask() { let a = _mm256_set1_epi8(-1); let b = _mm256_set1_epi8(-1); let m = _mm256_cmple_epi8_mask(a, b); @@ -14812,7 +15724,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmple_epi8_mask() { + const unsafe fn test_mm256_mask_cmple_epi8_mask() { let a = _mm256_set1_epi8(-1); let b = _mm256_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14821,7 +15733,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmple_epi8_mask() { + const unsafe fn test_mm_cmple_epi8_mask() { let a = _mm_set1_epi8(-1); let b = _mm_set1_epi8(-1); let m = _mm_cmple_epi8_mask(a, b); @@ -14829,7 +15741,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmple_epi8_mask() { + const unsafe fn test_mm_mask_cmple_epi8_mask() { let a = _mm_set1_epi8(-1); let b = _mm_set1_epi8(-1); let mask = 0b01010101_01010101; @@ -14838,7 +15750,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpge_epu16_mask() { + const unsafe fn test_mm512_cmpge_epu16_mask() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let m = _mm512_cmpge_epu16_mask(a, b); @@ -14846,7 +15758,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpge_epu16_mask() { + const unsafe fn test_mm512_mask_cmpge_epu16_mask() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14855,7 +15767,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpge_epu16_mask() { + const unsafe fn test_mm256_cmpge_epu16_mask() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let m = _mm256_cmpge_epu16_mask(a, b); @@ -14863,7 +15775,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpge_epu16_mask() { + const unsafe fn test_mm256_mask_cmpge_epu16_mask() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let mask = 0b01010101_01010101; @@ -14872,7 +15784,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpge_epu16_mask() { + const unsafe fn test_mm_cmpge_epu16_mask() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let m = _mm_cmpge_epu16_mask(a, b); @@ -14880,7 +15792,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpge_epu16_mask() { + const unsafe fn test_mm_mask_cmpge_epu16_mask() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let mask = 0b01010101; @@ -14889,7 +15801,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpge_epu8_mask() { + const unsafe fn test_mm512_cmpge_epu8_mask() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(1); let m = _mm512_cmpge_epu8_mask(a, b); @@ -14900,7 +15812,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpge_epu8_mask() { + const unsafe fn test_mm512_mask_cmpge_epu8_mask() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -14912,7 +15824,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpge_epu8_mask() { + const unsafe fn test_mm256_cmpge_epu8_mask() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(1); let m = _mm256_cmpge_epu8_mask(a, b); @@ -14920,7 +15832,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpge_epu8_mask() { + const unsafe fn test_mm256_mask_cmpge_epu8_mask() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14929,7 +15841,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpge_epu8_mask() { + const unsafe fn test_mm_cmpge_epu8_mask() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(1); let m = _mm_cmpge_epu8_mask(a, b); @@ -14937,7 +15849,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpge_epu8_mask() { + const unsafe fn test_mm_mask_cmpge_epu8_mask() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(1); let mask = 0b01010101_01010101; @@ -14946,7 +15858,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpge_epi16_mask() { + const unsafe fn test_mm512_cmpge_epi16_mask() { let a = _mm512_set1_epi16(-1); let b = _mm512_set1_epi16(-1); let m = _mm512_cmpge_epi16_mask(a, b); @@ -14954,7 +15866,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpge_epi16_mask() { + const unsafe fn test_mm512_mask_cmpge_epi16_mask() { let a = _mm512_set1_epi16(-1); let b = _mm512_set1_epi16(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -14963,7 +15875,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpge_epi16_mask() { + const unsafe fn test_mm256_cmpge_epi16_mask() { let a = _mm256_set1_epi16(-1); let b = _mm256_set1_epi16(-1); let m = _mm256_cmpge_epi16_mask(a, b); @@ -14971,7 +15883,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpge_epi16_mask() { + const unsafe fn test_mm256_mask_cmpge_epi16_mask() { let a = _mm256_set1_epi16(-1); let b = _mm256_set1_epi16(-1); let mask = 0b01010101_01010101; @@ -14980,7 +15892,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpge_epi16_mask() { + const unsafe fn test_mm_cmpge_epi16_mask() { let a = _mm_set1_epi16(-1); let b = _mm_set1_epi16(-1); let m = _mm_cmpge_epi16_mask(a, b); @@ -14988,7 +15900,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpge_epi16_mask() { + const unsafe fn test_mm_mask_cmpge_epi16_mask() { let a = _mm_set1_epi16(-1); let b = _mm_set1_epi16(-1); let mask = 0b01010101; @@ -14997,7 +15909,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpge_epi8_mask() { + const unsafe fn test_mm512_cmpge_epi8_mask() { let a = _mm512_set1_epi8(-1); let b = _mm512_set1_epi8(-1); let m = _mm512_cmpge_epi8_mask(a, b); @@ -15008,7 +15920,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpge_epi8_mask() { + const unsafe fn test_mm512_mask_cmpge_epi8_mask() { let a = _mm512_set1_epi8(-1); let b = _mm512_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -15020,7 +15932,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpge_epi8_mask() { + const unsafe fn test_mm256_cmpge_epi8_mask() { let a = _mm256_set1_epi8(-1); let b = _mm256_set1_epi8(-1); let m = _mm256_cmpge_epi8_mask(a, b); @@ -15028,7 +15940,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpge_epi8_mask() { + const unsafe fn test_mm256_mask_cmpge_epi8_mask() { let a = _mm256_set1_epi8(-1); let b = _mm256_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15037,7 +15949,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpge_epi8_mask() { + const unsafe fn test_mm_cmpge_epi8_mask() { let a = _mm_set1_epi8(-1); let b = _mm_set1_epi8(-1); let m = _mm_cmpge_epi8_mask(a, b); @@ -15045,7 +15957,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpge_epi8_mask() { + const unsafe fn test_mm_mask_cmpge_epi8_mask() { let a = _mm_set1_epi8(-1); let b = _mm_set1_epi8(-1); let mask = 0b01010101_01010101; @@ -15054,7 +15966,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpeq_epu16_mask() { + const unsafe fn test_mm512_cmpeq_epu16_mask() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let m = _mm512_cmpeq_epu16_mask(a, b); @@ -15062,7 +15974,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpeq_epu16_mask() { + const unsafe fn test_mm512_mask_cmpeq_epu16_mask() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15071,7 +15983,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpeq_epu16_mask() { + const unsafe fn test_mm256_cmpeq_epu16_mask() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let m = _mm256_cmpeq_epu16_mask(a, b); @@ -15079,7 +15991,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpeq_epu16_mask() { + const unsafe fn test_mm256_mask_cmpeq_epu16_mask() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let mask = 0b01010101_01010101; @@ -15088,7 +16000,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpeq_epu16_mask() { + const unsafe fn test_mm_cmpeq_epu16_mask() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let m = _mm_cmpeq_epu16_mask(a, b); @@ -15096,7 +16008,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpeq_epu16_mask() { + const unsafe fn test_mm_mask_cmpeq_epu16_mask() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let mask = 0b01010101; @@ -15105,7 +16017,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpeq_epu8_mask() { + const unsafe fn test_mm512_cmpeq_epu8_mask() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(1); let m = _mm512_cmpeq_epu8_mask(a, b); @@ -15116,7 +16028,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpeq_epu8_mask() { + const unsafe fn test_mm512_mask_cmpeq_epu8_mask() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -15128,7 +16040,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpeq_epu8_mask() { + const unsafe fn test_mm256_cmpeq_epu8_mask() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(1); let m = _mm256_cmpeq_epu8_mask(a, b); @@ -15136,7 +16048,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpeq_epu8_mask() { + const unsafe fn test_mm256_mask_cmpeq_epu8_mask() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15145,7 +16057,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpeq_epu8_mask() { + const unsafe fn test_mm_cmpeq_epu8_mask() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(1); let m = _mm_cmpeq_epu8_mask(a, b); @@ -15153,7 +16065,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpeq_epu8_mask() { + const unsafe fn test_mm_mask_cmpeq_epu8_mask() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(1); let mask = 0b01010101_01010101; @@ -15162,7 +16074,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpeq_epi16_mask() { + const unsafe fn test_mm512_cmpeq_epi16_mask() { let a = _mm512_set1_epi16(-1); let b = _mm512_set1_epi16(-1); let m = _mm512_cmpeq_epi16_mask(a, b); @@ -15170,7 +16082,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpeq_epi16_mask() { + const unsafe fn test_mm512_mask_cmpeq_epi16_mask() { let a = _mm512_set1_epi16(-1); let b = _mm512_set1_epi16(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15179,7 +16091,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpeq_epi16_mask() { + const unsafe fn test_mm256_cmpeq_epi16_mask() { let a = _mm256_set1_epi16(-1); let b = _mm256_set1_epi16(-1); let m = _mm256_cmpeq_epi16_mask(a, b); @@ -15187,7 +16099,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpeq_epi16_mask() { + const unsafe fn test_mm256_mask_cmpeq_epi16_mask() { let a = _mm256_set1_epi16(-1); let b = _mm256_set1_epi16(-1); let mask = 0b01010101_01010101; @@ -15196,7 +16108,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpeq_epi16_mask() { + const unsafe fn test_mm_cmpeq_epi16_mask() { let a = _mm_set1_epi16(-1); let b = _mm_set1_epi16(-1); let m = _mm_cmpeq_epi16_mask(a, b); @@ -15204,7 +16116,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpeq_epi16_mask() { + const unsafe fn test_mm_mask_cmpeq_epi16_mask() { let a = _mm_set1_epi16(-1); let b = _mm_set1_epi16(-1); let mask = 0b01010101; @@ -15213,7 +16125,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpeq_epi8_mask() { + const unsafe fn test_mm512_cmpeq_epi8_mask() { let a = _mm512_set1_epi8(-1); let b = _mm512_set1_epi8(-1); let m = _mm512_cmpeq_epi8_mask(a, b); @@ -15224,7 +16136,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpeq_epi8_mask() { + const unsafe fn test_mm512_mask_cmpeq_epi8_mask() { let a = _mm512_set1_epi8(-1); let b = _mm512_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -15236,7 +16148,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpeq_epi8_mask() { + const unsafe fn test_mm256_cmpeq_epi8_mask() { let a = _mm256_set1_epi8(-1); let b = _mm256_set1_epi8(-1); let m = _mm256_cmpeq_epi8_mask(a, b); @@ -15244,7 +16156,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpeq_epi8_mask() { + const unsafe fn test_mm256_mask_cmpeq_epi8_mask() { let a = _mm256_set1_epi8(-1); let b = _mm256_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15253,7 +16165,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpeq_epi8_mask() { + const unsafe fn test_mm_cmpeq_epi8_mask() { let a = _mm_set1_epi8(-1); let b = _mm_set1_epi8(-1); let m = _mm_cmpeq_epi8_mask(a, b); @@ -15261,7 +16173,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpeq_epi8_mask() { + const unsafe fn test_mm_mask_cmpeq_epi8_mask() { let a = _mm_set1_epi8(-1); let b = _mm_set1_epi8(-1); let mask = 0b01010101_01010101; @@ -15270,7 +16182,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpneq_epu16_mask() { + const unsafe fn test_mm512_cmpneq_epu16_mask() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(1); let m = _mm512_cmpneq_epu16_mask(a, b); @@ -15278,7 +16190,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpneq_epu16_mask() { + const unsafe fn test_mm512_mask_cmpneq_epu16_mask() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15287,7 +16199,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpneq_epu16_mask() { + const unsafe fn test_mm256_cmpneq_epu16_mask() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(1); let m = _mm256_cmpneq_epu16_mask(a, b); @@ -15295,7 +16207,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpneq_epu16_mask() { + const unsafe fn test_mm256_mask_cmpneq_epu16_mask() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(1); let mask = 0b01010101_01010101; @@ -15304,7 +16216,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpneq_epu16_mask() { + const unsafe fn test_mm_cmpneq_epu16_mask() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(1); let m = _mm_cmpneq_epu16_mask(a, b); @@ -15312,7 +16224,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpneq_epu16_mask() { + const unsafe fn test_mm_mask_cmpneq_epu16_mask() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(1); let mask = 0b01010101; @@ -15321,7 +16233,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpneq_epu8_mask() { + const unsafe fn test_mm512_cmpneq_epu8_mask() { let a = _mm512_set1_epi8(2); let b = _mm512_set1_epi8(1); let m = _mm512_cmpneq_epu8_mask(a, b); @@ -15332,7 +16244,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpneq_epu8_mask() { + const unsafe fn test_mm512_mask_cmpneq_epu8_mask() { let a = _mm512_set1_epi8(2); let b = _mm512_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -15344,7 +16256,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpneq_epu8_mask() { + const unsafe fn test_mm256_cmpneq_epu8_mask() { let a = _mm256_set1_epi8(2); let b = _mm256_set1_epi8(1); let m = _mm256_cmpneq_epu8_mask(a, b); @@ -15352,7 +16264,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpneq_epu8_mask() { + const unsafe fn test_mm256_mask_cmpneq_epu8_mask() { let a = _mm256_set1_epi8(2); let b = _mm256_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15361,7 +16273,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpneq_epu8_mask() { + const unsafe fn test_mm_cmpneq_epu8_mask() { let a = _mm_set1_epi8(2); let b = _mm_set1_epi8(1); let m = _mm_cmpneq_epu8_mask(a, b); @@ -15369,7 +16281,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpneq_epu8_mask() { + const unsafe fn test_mm_mask_cmpneq_epu8_mask() { let a = _mm_set1_epi8(2); let b = _mm_set1_epi8(1); let mask = 0b01010101_01010101; @@ -15378,7 +16290,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpneq_epi16_mask() { + const unsafe fn test_mm512_cmpneq_epi16_mask() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(-1); let m = _mm512_cmpneq_epi16_mask(a, b); @@ -15386,7 +16298,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpneq_epi16_mask() { + const unsafe fn test_mm512_mask_cmpneq_epi16_mask() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15395,7 +16307,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpneq_epi16_mask() { + const unsafe fn test_mm256_cmpneq_epi16_mask() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(-1); let m = _mm256_cmpneq_epi16_mask(a, b); @@ -15403,7 +16315,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpneq_epi16_mask() { + const unsafe fn test_mm256_mask_cmpneq_epi16_mask() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(-1); let mask = 0b01010101_01010101; @@ -15412,7 +16324,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpneq_epi16_mask() { + const unsafe fn test_mm_cmpneq_epi16_mask() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(-1); let m = _mm_cmpneq_epi16_mask(a, b); @@ -15420,7 +16332,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpneq_epi16_mask() { + const unsafe fn test_mm_mask_cmpneq_epi16_mask() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(-1); let mask = 0b01010101; @@ -15429,7 +16341,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmpneq_epi8_mask() { + const unsafe fn test_mm512_cmpneq_epi8_mask() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(-1); let m = _mm512_cmpneq_epi8_mask(a, b); @@ -15440,7 +16352,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmpneq_epi8_mask() { + const unsafe fn test_mm512_mask_cmpneq_epi8_mask() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -15452,7 +16364,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmpneq_epi8_mask() { + const unsafe fn test_mm256_cmpneq_epi8_mask() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(-1); let m = _mm256_cmpneq_epi8_mask(a, b); @@ -15460,7 +16372,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmpneq_epi8_mask() { + const unsafe fn test_mm256_mask_cmpneq_epi8_mask() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(-1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15469,7 +16381,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmpneq_epi8_mask() { + const unsafe fn test_mm_cmpneq_epi8_mask() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(-1); let m = _mm_cmpneq_epi8_mask(a, b); @@ -15477,7 +16389,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmpneq_epi8_mask() { + const unsafe fn test_mm_mask_cmpneq_epi8_mask() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(-1); let mask = 0b01010101_01010101; @@ -15486,7 +16398,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmp_epu16_mask() { + const unsafe fn test_mm512_cmp_epu16_mask() { let a = _mm512_set1_epi16(0); let b = _mm512_set1_epi16(1); let m = _mm512_cmp_epu16_mask::<_MM_CMPINT_LT>(a, b); @@ -15494,7 +16406,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmp_epu16_mask() { + const unsafe fn test_mm512_mask_cmp_epu16_mask() { let a = _mm512_set1_epi16(0); let b = _mm512_set1_epi16(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15503,7 +16415,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmp_epu16_mask() { + const unsafe fn test_mm256_cmp_epu16_mask() { let a = _mm256_set1_epi16(0); let b = _mm256_set1_epi16(1); let m = _mm256_cmp_epu16_mask::<_MM_CMPINT_LT>(a, b); @@ -15511,7 +16423,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmp_epu16_mask() { + const unsafe fn test_mm256_mask_cmp_epu16_mask() { let a = _mm256_set1_epi16(0); let b = _mm256_set1_epi16(1); let mask = 0b01010101_01010101; @@ -15520,7 +16432,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmp_epu16_mask() { + const unsafe fn test_mm_cmp_epu16_mask() { let a = _mm_set1_epi16(0); let b = _mm_set1_epi16(1); let m = _mm_cmp_epu16_mask::<_MM_CMPINT_LT>(a, b); @@ -15528,7 +16440,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmp_epu16_mask() { + const unsafe fn test_mm_mask_cmp_epu16_mask() { let a = _mm_set1_epi16(0); let b = _mm_set1_epi16(1); let mask = 0b01010101; @@ -15537,7 +16449,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmp_epu8_mask() { + const unsafe fn test_mm512_cmp_epu8_mask() { let a = _mm512_set1_epi8(0); let b = _mm512_set1_epi8(1); let m = _mm512_cmp_epu8_mask::<_MM_CMPINT_LT>(a, b); @@ -15548,7 +16460,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmp_epu8_mask() { + const unsafe fn test_mm512_mask_cmp_epu8_mask() { let a = _mm512_set1_epi8(0); let b = _mm512_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -15560,7 +16472,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmp_epu8_mask() { + const unsafe fn test_mm256_cmp_epu8_mask() { let a = _mm256_set1_epi8(0); let b = _mm256_set1_epi8(1); let m = _mm256_cmp_epu8_mask::<_MM_CMPINT_LT>(a, b); @@ -15568,7 +16480,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmp_epu8_mask() { + const unsafe fn test_mm256_mask_cmp_epu8_mask() { let a = _mm256_set1_epi8(0); let b = _mm256_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15577,7 +16489,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmp_epu8_mask() { + const unsafe fn test_mm_cmp_epu8_mask() { let a = _mm_set1_epi8(0); let b = _mm_set1_epi8(1); let m = _mm_cmp_epu8_mask::<_MM_CMPINT_LT>(a, b); @@ -15585,7 +16497,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmp_epu8_mask() { + const unsafe fn test_mm_mask_cmp_epu8_mask() { let a = _mm_set1_epi8(0); let b = _mm_set1_epi8(1); let mask = 0b01010101_01010101; @@ -15594,7 +16506,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmp_epi16_mask() { + const unsafe fn test_mm512_cmp_epi16_mask() { let a = _mm512_set1_epi16(0); let b = _mm512_set1_epi16(1); let m = _mm512_cmp_epi16_mask::<_MM_CMPINT_LT>(a, b); @@ -15602,7 +16514,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmp_epi16_mask() { + const unsafe fn test_mm512_mask_cmp_epi16_mask() { let a = _mm512_set1_epi16(0); let b = _mm512_set1_epi16(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15611,7 +16523,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmp_epi16_mask() { + const unsafe fn test_mm256_cmp_epi16_mask() { let a = _mm256_set1_epi16(0); let b = _mm256_set1_epi16(1); let m = _mm256_cmp_epi16_mask::<_MM_CMPINT_LT>(a, b); @@ -15619,7 +16531,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmp_epi16_mask() { + const unsafe fn test_mm256_mask_cmp_epi16_mask() { let a = _mm256_set1_epi16(0); let b = _mm256_set1_epi16(1); let mask = 0b01010101_01010101; @@ -15628,7 +16540,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmp_epi16_mask() { + const unsafe fn test_mm_cmp_epi16_mask() { let a = _mm_set1_epi16(0); let b = _mm_set1_epi16(1); let m = _mm_cmp_epi16_mask::<_MM_CMPINT_LT>(a, b); @@ -15636,7 +16548,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmp_epi16_mask() { + const unsafe fn test_mm_mask_cmp_epi16_mask() { let a = _mm_set1_epi16(0); let b = _mm_set1_epi16(1); let mask = 0b01010101; @@ -15645,7 +16557,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cmp_epi8_mask() { + const unsafe fn test_mm512_cmp_epi8_mask() { let a = _mm512_set1_epi8(0); let b = _mm512_set1_epi8(1); let m = _mm512_cmp_epi8_mask::<_MM_CMPINT_LT>(a, b); @@ -15656,7 +16568,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cmp_epi8_mask() { + const unsafe fn test_mm512_mask_cmp_epi8_mask() { let a = _mm512_set1_epi8(0); let b = _mm512_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101; @@ -15668,7 +16580,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cmp_epi8_mask() { + const unsafe fn test_mm256_cmp_epi8_mask() { let a = _mm256_set1_epi8(0); let b = _mm256_set1_epi8(1); let m = _mm256_cmp_epi8_mask::<_MM_CMPINT_LT>(a, b); @@ -15676,7 +16588,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cmp_epi8_mask() { + const unsafe fn test_mm256_mask_cmp_epi8_mask() { let a = _mm256_set1_epi8(0); let b = _mm256_set1_epi8(1); let mask = 0b01010101_01010101_01010101_01010101; @@ -15685,7 +16597,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cmp_epi8_mask() { + const unsafe fn test_mm_cmp_epi8_mask() { let a = _mm_set1_epi8(0); let b = _mm_set1_epi8(1); let m = _mm_cmp_epi8_mask::<_MM_CMPINT_LT>(a, b); @@ -15693,7 +16605,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cmp_epi8_mask() { + const unsafe fn test_mm_mask_cmp_epi8_mask() { let a = _mm_set1_epi8(0); let b = _mm_set1_epi8(1); let mask = 0b01010101_01010101; @@ -15702,91 +16614,91 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_add_epi16() { + const unsafe fn test_mm256_reduce_add_epi16() { let a = _mm256_set1_epi16(1); let e = _mm256_reduce_add_epi16(a); assert_eq!(16, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_add_epi16() { + const unsafe fn test_mm256_mask_reduce_add_epi16() { let a = _mm256_set1_epi16(1); let e = _mm256_mask_reduce_add_epi16(0b11111111_00000000, a); assert_eq!(8, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_add_epi16() { + const unsafe fn test_mm_reduce_add_epi16() { let a = _mm_set1_epi16(1); let e = _mm_reduce_add_epi16(a); assert_eq!(8, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_add_epi16() { + const unsafe fn test_mm_mask_reduce_add_epi16() { let a = _mm_set1_epi16(1); let e = _mm_mask_reduce_add_epi16(0b11110000, a); assert_eq!(4, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_add_epi8() { + const unsafe fn test_mm256_reduce_add_epi8() { let a = _mm256_set1_epi8(1); let e = _mm256_reduce_add_epi8(a); assert_eq!(32, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_add_epi8() { + const unsafe fn test_mm256_mask_reduce_add_epi8() { let a = _mm256_set1_epi8(1); let e = _mm256_mask_reduce_add_epi8(0b11111111_00000000_11111111_00000000, a); assert_eq!(16, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_add_epi8() { + const unsafe fn test_mm_reduce_add_epi8() { let a = _mm_set1_epi8(1); let e = _mm_reduce_add_epi8(a); assert_eq!(16, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_add_epi8() { + const unsafe fn test_mm_mask_reduce_add_epi8() { let a = _mm_set1_epi8(1); let e = _mm_mask_reduce_add_epi8(0b11111111_00000000, a); assert_eq!(8, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_and_epi16() { + const unsafe fn test_mm256_reduce_and_epi16() { let a = _mm256_set_epi16(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e = _mm256_reduce_and_epi16(a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_and_epi16() { + const unsafe fn test_mm256_mask_reduce_and_epi16() { let a = _mm256_set_epi16(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e = _mm256_mask_reduce_and_epi16(0b11111111_00000000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_and_epi16() { + const unsafe fn test_mm_reduce_and_epi16() { let a = _mm_set_epi16(1, 1, 1, 1, 2, 2, 2, 2); let e = _mm_reduce_and_epi16(a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_and_epi16() { + const unsafe fn test_mm_mask_reduce_and_epi16() { let a = _mm_set_epi16(1, 1, 1, 1, 2, 2, 2, 2); let e = _mm_mask_reduce_and_epi16(0b11110000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_and_epi8() { + const unsafe fn test_mm256_reduce_and_epi8() { let a = _mm256_set_epi8( 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, @@ -15796,7 +16708,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_and_epi8() { + const unsafe fn test_mm256_mask_reduce_and_epi8() { let a = _mm256_set_epi8( 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, @@ -15806,49 +16718,49 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_and_epi8() { + const unsafe fn test_mm_reduce_and_epi8() { let a = _mm_set_epi8(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e = _mm_reduce_and_epi8(a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_and_epi8() { + const unsafe fn test_mm_mask_reduce_and_epi8() { let a = _mm_set_epi8(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e = _mm_mask_reduce_and_epi8(0b11111111_00000000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_mul_epi16() { + const unsafe fn test_mm256_reduce_mul_epi16() { let a = _mm256_set_epi16(2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1); let e = _mm256_reduce_mul_epi16(a); assert_eq!(256, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_mul_epi16() { + const unsafe fn test_mm256_mask_reduce_mul_epi16() { let a = _mm256_set_epi16(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e = _mm256_mask_reduce_mul_epi16(0b11111111_00000000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_mul_epi16() { + const unsafe fn test_mm_reduce_mul_epi16() { let a = _mm_set_epi16(2, 2, 2, 2, 1, 1, 1, 1); let e = _mm_reduce_mul_epi16(a); assert_eq!(16, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_mul_epi16() { + const unsafe fn test_mm_mask_reduce_mul_epi16() { let a = _mm_set_epi16(1, 1, 1, 1, 2, 2, 2, 2); let e = _mm_mask_reduce_mul_epi16(0b11110000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_mul_epi8() { + const unsafe fn test_mm256_reduce_mul_epi8() { let a = _mm256_set_epi8( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, @@ -15858,7 +16770,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_mul_epi8() { + const unsafe fn test_mm256_mask_reduce_mul_epi8() { let a = _mm256_set_epi8( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, @@ -15868,49 +16780,49 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_mul_epi8() { + const unsafe fn test_mm_reduce_mul_epi8() { let a = _mm_set_epi8(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2); let e = _mm_reduce_mul_epi8(a); assert_eq!(8, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_mul_epi8() { + const unsafe fn test_mm_mask_reduce_mul_epi8() { let a = _mm_set_epi8(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2); let e = _mm_mask_reduce_mul_epi8(0b11111111_00000000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_max_epi16() { + const unsafe fn test_mm256_reduce_max_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i16 = _mm256_reduce_max_epi16(a); assert_eq!(15, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_max_epi16() { + const unsafe fn test_mm256_mask_reduce_max_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i16 = _mm256_mask_reduce_max_epi16(0b11111111_00000000, a); assert_eq!(7, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_max_epi16() { + const unsafe fn test_mm_reduce_max_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let e: i16 = _mm_reduce_max_epi16(a); assert_eq!(7, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_max_epi16() { + const unsafe fn test_mm_mask_reduce_max_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let e: i16 = _mm_mask_reduce_max_epi16(0b11110000, a); assert_eq!(3, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_max_epi8() { + const unsafe fn test_mm256_reduce_max_epi8() { let a = _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -15920,7 +16832,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_max_epi8() { + const unsafe fn test_mm256_mask_reduce_max_epi8() { let a = _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -15930,49 +16842,49 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_max_epi8() { + const unsafe fn test_mm_reduce_max_epi8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i8 = _mm_reduce_max_epi8(a); assert_eq!(15, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_max_epi8() { + const unsafe fn test_mm_mask_reduce_max_epi8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i8 = _mm_mask_reduce_max_epi8(0b11111111_00000000, a); assert_eq!(7, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_max_epu16() { + const unsafe fn test_mm256_reduce_max_epu16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u16 = _mm256_reduce_max_epu16(a); assert_eq!(15, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_max_epu16() { + const unsafe fn test_mm256_mask_reduce_max_epu16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u16 = _mm256_mask_reduce_max_epu16(0b11111111_00000000, a); assert_eq!(7, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_max_epu16() { + const unsafe fn test_mm_reduce_max_epu16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let e: u16 = _mm_reduce_max_epu16(a); assert_eq!(7, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_max_epu16() { + const unsafe fn test_mm_mask_reduce_max_epu16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let e: u16 = _mm_mask_reduce_max_epu16(0b11110000, a); assert_eq!(3, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_max_epu8() { + const unsafe fn test_mm256_reduce_max_epu8() { let a = _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -15982,7 +16894,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_max_epu8() { + const unsafe fn test_mm256_mask_reduce_max_epu8() { let a = _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -15992,49 +16904,49 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_max_epu8() { + const unsafe fn test_mm_reduce_max_epu8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u8 = _mm_reduce_max_epu8(a); assert_eq!(15, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_max_epu8() { + const unsafe fn test_mm_mask_reduce_max_epu8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u8 = _mm_mask_reduce_max_epu8(0b11111111_00000000, a); assert_eq!(7, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_min_epi16() { + const unsafe fn test_mm256_reduce_min_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i16 = _mm256_reduce_min_epi16(a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_min_epi16() { + const unsafe fn test_mm256_mask_reduce_min_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i16 = _mm256_mask_reduce_min_epi16(0b11111111_00000000, a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_min_epi16() { + const unsafe fn test_mm_reduce_min_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let e: i16 = _mm_reduce_min_epi16(a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_min_epi16() { + const unsafe fn test_mm_mask_reduce_min_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let e: i16 = _mm_mask_reduce_min_epi16(0b11110000, a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_min_epi8() { + const unsafe fn test_mm256_reduce_min_epi8() { let a = _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -16044,7 +16956,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_min_epi8() { + const unsafe fn test_mm256_mask_reduce_min_epi8() { let a = _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -16054,49 +16966,49 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_min_epi8() { + const unsafe fn test_mm_reduce_min_epi8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i8 = _mm_reduce_min_epi8(a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_min_epi8() { + const unsafe fn test_mm_mask_reduce_min_epi8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i8 = _mm_mask_reduce_min_epi8(0b11111111_00000000, a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_min_epu16() { + const unsafe fn test_mm256_reduce_min_epu16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u16 = _mm256_reduce_min_epu16(a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_min_epu16() { + const unsafe fn test_mm256_mask_reduce_min_epu16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u16 = _mm256_mask_reduce_min_epu16(0b11111111_00000000, a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_min_epu16() { + const unsafe fn test_mm_reduce_min_epu16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let e: u16 = _mm_reduce_min_epu16(a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_min_epu16() { + const unsafe fn test_mm_mask_reduce_min_epu16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let e: u16 = _mm_mask_reduce_min_epu16(0b11110000, a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_min_epu8() { + const unsafe fn test_mm256_reduce_min_epu8() { let a = _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -16106,7 +17018,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_min_epu8() { + const unsafe fn test_mm256_mask_reduce_min_epu8() { let a = _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -16116,49 +17028,49 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_min_epu8() { + const unsafe fn test_mm_reduce_min_epu8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u8 = _mm_reduce_min_epu8(a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_min_epu8() { + const unsafe fn test_mm_mask_reduce_min_epu8() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u8 = _mm_mask_reduce_min_epu8(0b11111111_00000000, a); assert_eq!(0, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_or_epi16() { + const unsafe fn test_mm256_reduce_or_epi16() { let a = _mm256_set_epi16(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e = _mm256_reduce_or_epi16(a); assert_eq!(3, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_or_epi16() { + const unsafe fn test_mm256_mask_reduce_or_epi16() { let a = _mm256_set_epi16(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e = _mm256_mask_reduce_or_epi16(0b11111111_00000000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_or_epi16() { + const unsafe fn test_mm_reduce_or_epi16() { let a = _mm_set_epi16(1, 1, 1, 1, 2, 2, 2, 2); let e = _mm_reduce_or_epi16(a); assert_eq!(3, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_or_epi16() { + const unsafe fn test_mm_mask_reduce_or_epi16() { let a = _mm_set_epi16(1, 1, 1, 1, 2, 2, 2, 2); let e = _mm_mask_reduce_or_epi16(0b11110000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_reduce_or_epi8() { + const unsafe fn test_mm256_reduce_or_epi8() { let a = _mm256_set_epi8( 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, @@ -16168,7 +17080,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_reduce_or_epi8() { + const unsafe fn test_mm256_mask_reduce_or_epi8() { let a = _mm256_set_epi8( 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, @@ -16178,21 +17090,21 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_reduce_or_epi8() { + const unsafe fn test_mm_reduce_or_epi8() { let a = _mm_set_epi8(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e = _mm_reduce_or_epi8(a); assert_eq!(3, e); } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_reduce_or_epi8() { + const unsafe fn test_mm_mask_reduce_or_epi8() { let a = _mm_set_epi8(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e = _mm_mask_reduce_or_epi8(0b11111111_00000000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_loadu_epi16() { + const unsafe fn test_mm512_loadu_epi16() { #[rustfmt::skip] let a: [i16; 32] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; let r = _mm512_loadu_epi16(&a[0]); @@ -16202,7 +17114,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_loadu_epi16() { + const unsafe fn test_mm256_loadu_epi16() { let a: [i16; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let r = _mm256_loadu_epi16(&a[0]); let e = _mm256_set_epi16(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1); @@ -16210,7 +17122,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_loadu_epi16() { + const unsafe fn test_mm_loadu_epi16() { let a: [i16; 8] = [1, 2, 3, 4, 5, 6, 7, 8]; let r = _mm_loadu_epi16(&a[0]); let e = _mm_set_epi16(8, 7, 6, 5, 4, 3, 2, 1); @@ -16218,7 +17130,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_loadu_epi8() { + const unsafe fn test_mm512_loadu_epi8() { #[rustfmt::skip] let a: [i8; 64] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; @@ -16230,7 +17142,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_loadu_epi8() { + const unsafe fn test_mm256_loadu_epi8() { #[rustfmt::skip] let a: [i8; 32] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; let r = _mm256_loadu_epi8(&a[0]); @@ -16240,7 +17152,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_loadu_epi8() { + const unsafe fn test_mm_loadu_epi8() { let a: [i8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let r = _mm_loadu_epi8(&a[0]); let e = _mm_set_epi8(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1); @@ -16248,7 +17160,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_storeu_epi16() { + const unsafe fn test_mm512_storeu_epi16() { let a = _mm512_set1_epi16(9); let mut r = _mm512_undefined_epi32(); _mm512_storeu_epi16(&mut r as *mut _ as *mut i16, a); @@ -16256,7 +17168,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_storeu_epi16() { + const unsafe fn test_mm256_storeu_epi16() { let a = _mm256_set1_epi16(9); let mut r = _mm256_set1_epi32(0); _mm256_storeu_epi16(&mut r as *mut _ as *mut i16, a); @@ -16264,7 +17176,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_storeu_epi16() { + const unsafe fn test_mm_storeu_epi16() { let a = _mm_set1_epi16(9); let mut r = _mm_set1_epi32(0); _mm_storeu_epi16(&mut r as *mut _ as *mut i16, a); @@ -16272,7 +17184,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_storeu_epi8() { + const unsafe fn test_mm512_storeu_epi8() { let a = _mm512_set1_epi8(9); let mut r = _mm512_undefined_epi32(); _mm512_storeu_epi8(&mut r as *mut _ as *mut i8, a); @@ -16280,7 +17192,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_storeu_epi8() { + const unsafe fn test_mm256_storeu_epi8() { let a = _mm256_set1_epi8(9); let mut r = _mm256_set1_epi32(0); _mm256_storeu_epi8(&mut r as *mut _ as *mut i8, a); @@ -16288,7 +17200,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_storeu_epi8() { + const unsafe fn test_mm_storeu_epi8() { let a = _mm_set1_epi8(9); let mut r = _mm_set1_epi32(0); _mm_storeu_epi8(&mut r as *mut _ as *mut i8, a); @@ -16296,7 +17208,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_loadu_epi16() { + const unsafe fn test_mm512_mask_loadu_epi16() { let src = _mm512_set1_epi16(42); let a = &[ 1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, @@ -16314,7 +17226,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_loadu_epi16() { + const unsafe fn test_mm512_maskz_loadu_epi16() { let a = &[ 1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -16331,7 +17243,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_storeu_epi16() { + const unsafe fn test_mm512_mask_storeu_epi16() { let mut r = [42_i16; 32]; let a = &[ 1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, @@ -16349,7 +17261,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_loadu_epi8() { + const unsafe fn test_mm512_mask_loadu_epi8() { let src = _mm512_set1_epi8(42); let a = &[ 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, @@ -16369,7 +17281,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_loadu_epi8() { + const unsafe fn test_mm512_maskz_loadu_epi8() { let a = &[ 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, @@ -16388,7 +17300,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_storeu_epi8() { + const unsafe fn test_mm512_mask_storeu_epi8() { let mut r = [42_i8; 64]; let a = &[ 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, @@ -16408,7 +17320,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_loadu_epi16() { + const unsafe fn test_mm256_mask_loadu_epi16() { let src = _mm256_set1_epi16(42); let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let p = a.as_ptr(); @@ -16422,7 +17334,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_loadu_epi16() { + const unsafe fn test_mm256_maskz_loadu_epi16() { let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let p = a.as_ptr(); let m = 0b11101000_11001010; @@ -16433,7 +17345,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_storeu_epi16() { + const unsafe fn test_mm256_mask_storeu_epi16() { let mut r = [42_i16; 16]; let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let a = _mm256_loadu_epi16(a.as_ptr()); @@ -16447,7 +17359,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_loadu_epi8() { + const unsafe fn test_mm256_mask_loadu_epi8() { let src = _mm256_set1_epi8(42); let a = &[ 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, @@ -16465,7 +17377,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_loadu_epi8() { + const unsafe fn test_mm256_maskz_loadu_epi8() { let a = &[ 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -16482,7 +17394,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_storeu_epi8() { + const unsafe fn test_mm256_mask_storeu_epi8() { let mut r = [42_i8; 32]; let a = &[ 1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, @@ -16500,7 +17412,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_loadu_epi16() { + const unsafe fn test_mm_mask_loadu_epi16() { let src = _mm_set1_epi16(42); let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8]; let p = a.as_ptr(); @@ -16512,7 +17424,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_loadu_epi16() { + const unsafe fn test_mm_maskz_loadu_epi16() { let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8]; let p = a.as_ptr(); let m = 0b11001010; @@ -16523,7 +17435,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_storeu_epi16() { + const unsafe fn test_mm_mask_storeu_epi16() { let mut r = [42_i16; 8]; let a = &[1_i16, 2, 3, 4, 5, 6, 7, 8]; let a = _mm_loadu_epi16(a.as_ptr()); @@ -16535,7 +17447,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_loadu_epi8() { + const unsafe fn test_mm_mask_loadu_epi8() { let src = _mm_set1_epi8(42); let a = &[1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let p = a.as_ptr(); @@ -16549,7 +17461,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_loadu_epi8() { + const unsafe fn test_mm_maskz_loadu_epi8() { let a = &[1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let p = a.as_ptr(); let m = 0b11101000_11001010; @@ -16560,7 +17472,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_storeu_epi8() { + const unsafe fn test_mm_mask_storeu_epi8() { let mut r = [42_i8; 16]; let a = &[1_i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let a = _mm_loadu_epi8(a.as_ptr()); @@ -16574,7 +17486,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_madd_epi16() { + const unsafe fn test_mm512_madd_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_madd_epi16(a, b); @@ -16583,7 +17495,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_madd_epi16() { + const unsafe fn test_mm512_mask_madd_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_mask_madd_epi16(a, 0, a, b); @@ -16611,7 +17523,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_madd_epi16() { + const unsafe fn test_mm512_maskz_madd_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_maskz_madd_epi16(0, a, b); @@ -16622,7 +17534,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_madd_epi16() { + const unsafe fn test_mm256_mask_madd_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let r = _mm256_mask_madd_epi16(a, 0, a, b); @@ -16642,7 +17554,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_madd_epi16() { + const unsafe fn test_mm256_maskz_madd_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let r = _mm256_maskz_madd_epi16(0, a, b); @@ -16653,7 +17565,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_madd_epi16() { + const unsafe fn test_mm_mask_madd_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let r = _mm_mask_madd_epi16(a, 0, a, b); @@ -16664,7 +17576,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_madd_epi16() { + const unsafe fn test_mm_maskz_madd_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let r = _mm_maskz_madd_epi16(0, a, b); @@ -17123,7 +18035,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_avg_epu16() { + const unsafe fn test_mm512_avg_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_avg_epu16(a, b); @@ -17132,7 +18044,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_avg_epu16() { + const unsafe fn test_mm512_mask_avg_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_mask_avg_epu16(a, 0, a, b); @@ -17145,7 +18057,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_avg_epu16() { + const unsafe fn test_mm512_maskz_avg_epu16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1); let r = _mm512_maskz_avg_epu16(0, a, b); @@ -17158,7 +18070,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_avg_epu16() { + const unsafe fn test_mm256_mask_avg_epu16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let r = _mm256_mask_avg_epu16(a, 0, a, b); @@ -17169,7 +18081,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_avg_epu16() { + const unsafe fn test_mm256_maskz_avg_epu16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1); let r = _mm256_maskz_avg_epu16(0, a, b); @@ -17180,7 +18092,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_avg_epu16() { + const unsafe fn test_mm_mask_avg_epu16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let r = _mm_mask_avg_epu16(a, 0, a, b); @@ -17191,7 +18103,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_avg_epu16() { + const unsafe fn test_mm_maskz_avg_epu16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1); let r = _mm_maskz_avg_epu16(0, a, b); @@ -17202,7 +18114,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_avg_epu8() { + const unsafe fn test_mm512_avg_epu8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(1); let r = _mm512_avg_epu8(a, b); @@ -17211,7 +18123,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_avg_epu8() { + const unsafe fn test_mm512_mask_avg_epu8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(1); let r = _mm512_mask_avg_epu8(a, 0, a, b); @@ -17231,7 +18143,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_avg_epu8() { + const unsafe fn test_mm512_maskz_avg_epu8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(1); let r = _mm512_maskz_avg_epu8(0, a, b); @@ -17250,7 +18162,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_avg_epu8() { + const unsafe fn test_mm256_mask_avg_epu8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(1); let r = _mm256_mask_avg_epu8(a, 0, a, b); @@ -17263,7 +18175,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_avg_epu8() { + const unsafe fn test_mm256_maskz_avg_epu8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(1); let r = _mm256_maskz_avg_epu8(0, a, b); @@ -17276,7 +18188,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_avg_epu8() { + const unsafe fn test_mm_mask_avg_epu8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(1); let r = _mm_mask_avg_epu8(a, 0, a, b); @@ -17287,7 +18199,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_avg_epu8() { + const unsafe fn test_mm_maskz_avg_epu8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(1); let r = _mm_maskz_avg_epu8(0, a, b); @@ -17373,7 +18285,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_slli_epi16() { + const unsafe fn test_mm512_slli_epi16() { let a = _mm512_set1_epi16(1 << 15); let r = _mm512_slli_epi16::<1>(a); let e = _mm512_set1_epi16(0); @@ -17381,7 +18293,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_slli_epi16() { + const unsafe fn test_mm512_mask_slli_epi16() { let a = _mm512_set1_epi16(1 << 15); let r = _mm512_mask_slli_epi16::<1>(a, 0, a); assert_eq_m512i(r, a); @@ -17391,7 +18303,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_slli_epi16() { + const unsafe fn test_mm512_maskz_slli_epi16() { let a = _mm512_set1_epi16(1 << 15); let r = _mm512_maskz_slli_epi16::<1>(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -17401,7 +18313,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_slli_epi16() { + const unsafe fn test_mm256_mask_slli_epi16() { let a = _mm256_set1_epi16(1 << 15); let r = _mm256_mask_slli_epi16::<1>(a, 0, a); assert_eq_m256i(r, a); @@ -17411,7 +18323,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_slli_epi16() { + const unsafe fn test_mm256_maskz_slli_epi16() { let a = _mm256_set1_epi16(1 << 15); let r = _mm256_maskz_slli_epi16::<1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -17421,7 +18333,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_slli_epi16() { + const unsafe fn test_mm_mask_slli_epi16() { let a = _mm_set1_epi16(1 << 15); let r = _mm_mask_slli_epi16::<1>(a, 0, a); assert_eq_m128i(r, a); @@ -17431,7 +18343,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_slli_epi16() { + const unsafe fn test_mm_maskz_slli_epi16() { let a = _mm_set1_epi16(1 << 15); let r = _mm_maskz_slli_epi16::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -17441,7 +18353,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_sllv_epi16() { + const unsafe fn test_mm512_sllv_epi16() { let a = _mm512_set1_epi16(1 << 15); let count = _mm512_set1_epi16(2); let r = _mm512_sllv_epi16(a, count); @@ -17450,7 +18362,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_sllv_epi16() { + const unsafe fn test_mm512_mask_sllv_epi16() { let a = _mm512_set1_epi16(1 << 15); let count = _mm512_set1_epi16(2); let r = _mm512_mask_sllv_epi16(a, 0, a, count); @@ -17461,7 +18373,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_sllv_epi16() { + const unsafe fn test_mm512_maskz_sllv_epi16() { let a = _mm512_set1_epi16(1 << 15); let count = _mm512_set1_epi16(2); let r = _mm512_maskz_sllv_epi16(0, a, count); @@ -17472,7 +18384,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_sllv_epi16() { + const unsafe fn test_mm256_sllv_epi16() { let a = _mm256_set1_epi16(1 << 15); let count = _mm256_set1_epi16(2); let r = _mm256_sllv_epi16(a, count); @@ -17481,7 +18393,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_sllv_epi16() { + const unsafe fn test_mm256_mask_sllv_epi16() { let a = _mm256_set1_epi16(1 << 15); let count = _mm256_set1_epi16(2); let r = _mm256_mask_sllv_epi16(a, 0, a, count); @@ -17492,7 +18404,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_sllv_epi16() { + const unsafe fn test_mm256_maskz_sllv_epi16() { let a = _mm256_set1_epi16(1 << 15); let count = _mm256_set1_epi16(2); let r = _mm256_maskz_sllv_epi16(0, a, count); @@ -17503,7 +18415,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_sllv_epi16() { + const unsafe fn test_mm_sllv_epi16() { let a = _mm_set1_epi16(1 << 15); let count = _mm_set1_epi16(2); let r = _mm_sllv_epi16(a, count); @@ -17512,7 +18424,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_sllv_epi16() { + const unsafe fn test_mm_mask_sllv_epi16() { let a = _mm_set1_epi16(1 << 15); let count = _mm_set1_epi16(2); let r = _mm_mask_sllv_epi16(a, 0, a, count); @@ -17523,7 +18435,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_sllv_epi16() { + const unsafe fn test_mm_maskz_sllv_epi16() { let a = _mm_set1_epi16(1 << 15); let count = _mm_set1_epi16(2); let r = _mm_maskz_sllv_epi16(0, a, count); @@ -17609,7 +18521,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_srli_epi16() { + const unsafe fn test_mm512_srli_epi16() { let a = _mm512_set1_epi16(1 << 1); let r = _mm512_srli_epi16::<2>(a); let e = _mm512_set1_epi16(0); @@ -17617,7 +18529,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_srli_epi16() { + const unsafe fn test_mm512_mask_srli_epi16() { let a = _mm512_set1_epi16(1 << 1); let r = _mm512_mask_srli_epi16::<2>(a, 0, a); assert_eq_m512i(r, a); @@ -17627,7 +18539,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_srli_epi16() { + const unsafe fn test_mm512_maskz_srli_epi16() { let a = _mm512_set1_epi16(1 << 1); let r = _mm512_maskz_srli_epi16::<2>(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -17637,7 +18549,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_srli_epi16() { + const unsafe fn test_mm256_mask_srli_epi16() { let a = _mm256_set1_epi16(1 << 1); let r = _mm256_mask_srli_epi16::<2>(a, 0, a); assert_eq_m256i(r, a); @@ -17647,7 +18559,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_srli_epi16() { + const unsafe fn test_mm256_maskz_srli_epi16() { let a = _mm256_set1_epi16(1 << 1); let r = _mm256_maskz_srli_epi16::<2>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -17657,7 +18569,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_srli_epi16() { + const unsafe fn test_mm_mask_srli_epi16() { let a = _mm_set1_epi16(1 << 1); let r = _mm_mask_srli_epi16::<2>(a, 0, a); assert_eq_m128i(r, a); @@ -17667,7 +18579,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_srli_epi16() { + const unsafe fn test_mm_maskz_srli_epi16() { let a = _mm_set1_epi16(1 << 1); let r = _mm_maskz_srli_epi16::<2>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -17677,7 +18589,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_srlv_epi16() { + const unsafe fn test_mm512_srlv_epi16() { let a = _mm512_set1_epi16(1 << 1); let count = _mm512_set1_epi16(2); let r = _mm512_srlv_epi16(a, count); @@ -17686,7 +18598,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_srlv_epi16() { + const unsafe fn test_mm512_mask_srlv_epi16() { let a = _mm512_set1_epi16(1 << 1); let count = _mm512_set1_epi16(2); let r = _mm512_mask_srlv_epi16(a, 0, a, count); @@ -17697,7 +18609,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_srlv_epi16() { + const unsafe fn test_mm512_maskz_srlv_epi16() { let a = _mm512_set1_epi16(1 << 1); let count = _mm512_set1_epi16(2); let r = _mm512_maskz_srlv_epi16(0, a, count); @@ -17708,7 +18620,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_srlv_epi16() { + const unsafe fn test_mm256_srlv_epi16() { let a = _mm256_set1_epi16(1 << 1); let count = _mm256_set1_epi16(2); let r = _mm256_srlv_epi16(a, count); @@ -17717,7 +18629,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_srlv_epi16() { + const unsafe fn test_mm256_mask_srlv_epi16() { let a = _mm256_set1_epi16(1 << 1); let count = _mm256_set1_epi16(2); let r = _mm256_mask_srlv_epi16(a, 0, a, count); @@ -17728,7 +18640,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_srlv_epi16() { + const unsafe fn test_mm256_maskz_srlv_epi16() { let a = _mm256_set1_epi16(1 << 1); let count = _mm256_set1_epi16(2); let r = _mm256_maskz_srlv_epi16(0, a, count); @@ -17739,7 +18651,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_srlv_epi16() { + const unsafe fn test_mm_srlv_epi16() { let a = _mm_set1_epi16(1 << 1); let count = _mm_set1_epi16(2); let r = _mm_srlv_epi16(a, count); @@ -17748,7 +18660,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_srlv_epi16() { + const unsafe fn test_mm_mask_srlv_epi16() { let a = _mm_set1_epi16(1 << 1); let count = _mm_set1_epi16(2); let r = _mm_mask_srlv_epi16(a, 0, a, count); @@ -17759,7 +18671,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_srlv_epi16() { + const unsafe fn test_mm_maskz_srlv_epi16() { let a = _mm_set1_epi16(1 << 1); let count = _mm_set1_epi16(2); let r = _mm_maskz_srlv_epi16(0, a, count); @@ -17845,7 +18757,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_srai_epi16() { + const unsafe fn test_mm512_srai_epi16() { let a = _mm512_set1_epi16(8); let r = _mm512_srai_epi16::<2>(a); let e = _mm512_set1_epi16(2); @@ -17853,7 +18765,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_srai_epi16() { + const unsafe fn test_mm512_mask_srai_epi16() { let a = _mm512_set1_epi16(8); let r = _mm512_mask_srai_epi16::<2>(a, 0, a); assert_eq_m512i(r, a); @@ -17863,7 +18775,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_srai_epi16() { + const unsafe fn test_mm512_maskz_srai_epi16() { let a = _mm512_set1_epi16(8); let r = _mm512_maskz_srai_epi16::<2>(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -17873,7 +18785,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_srai_epi16() { + const unsafe fn test_mm256_mask_srai_epi16() { let a = _mm256_set1_epi16(8); let r = _mm256_mask_srai_epi16::<2>(a, 0, a); assert_eq_m256i(r, a); @@ -17883,7 +18795,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_srai_epi16() { + const unsafe fn test_mm256_maskz_srai_epi16() { let a = _mm256_set1_epi16(8); let r = _mm256_maskz_srai_epi16::<2>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -17893,7 +18805,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_srai_epi16() { + const unsafe fn test_mm_mask_srai_epi16() { let a = _mm_set1_epi16(8); let r = _mm_mask_srai_epi16::<2>(a, 0, a); assert_eq_m128i(r, a); @@ -17903,7 +18815,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_srai_epi16() { + const unsafe fn test_mm_maskz_srai_epi16() { let a = _mm_set1_epi16(8); let r = _mm_maskz_srai_epi16::<2>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -17913,7 +18825,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_srav_epi16() { + const unsafe fn test_mm512_srav_epi16() { let a = _mm512_set1_epi16(8); let count = _mm512_set1_epi16(2); let r = _mm512_srav_epi16(a, count); @@ -17922,7 +18834,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_srav_epi16() { + const unsafe fn test_mm512_mask_srav_epi16() { let a = _mm512_set1_epi16(8); let count = _mm512_set1_epi16(2); let r = _mm512_mask_srav_epi16(a, 0, a, count); @@ -17933,7 +18845,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_srav_epi16() { + const unsafe fn test_mm512_maskz_srav_epi16() { let a = _mm512_set1_epi16(8); let count = _mm512_set1_epi16(2); let r = _mm512_maskz_srav_epi16(0, a, count); @@ -17944,7 +18856,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_srav_epi16() { + const unsafe fn test_mm256_srav_epi16() { let a = _mm256_set1_epi16(8); let count = _mm256_set1_epi16(2); let r = _mm256_srav_epi16(a, count); @@ -17953,7 +18865,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_srav_epi16() { + const unsafe fn test_mm256_mask_srav_epi16() { let a = _mm256_set1_epi16(8); let count = _mm256_set1_epi16(2); let r = _mm256_mask_srav_epi16(a, 0, a, count); @@ -17964,7 +18876,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_srav_epi16() { + const unsafe fn test_mm256_maskz_srav_epi16() { let a = _mm256_set1_epi16(8); let count = _mm256_set1_epi16(2); let r = _mm256_maskz_srav_epi16(0, a, count); @@ -17975,7 +18887,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_srav_epi16() { + const unsafe fn test_mm_srav_epi16() { let a = _mm_set1_epi16(8); let count = _mm_set1_epi16(2); let r = _mm_srav_epi16(a, count); @@ -17984,7 +18896,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_srav_epi16() { + const unsafe fn test_mm_mask_srav_epi16() { let a = _mm_set1_epi16(8); let count = _mm_set1_epi16(2); let r = _mm_mask_srav_epi16(a, 0, a, count); @@ -17995,7 +18907,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_srav_epi16() { + const unsafe fn test_mm_maskz_srav_epi16() { let a = _mm_set1_epi16(8); let count = _mm_set1_epi16(2); let r = _mm_maskz_srav_epi16(0, a, count); @@ -18288,7 +19200,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_blend_epi16() { + const unsafe fn test_mm512_mask_blend_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(2); let r = _mm512_mask_blend_epi16(0b11111111_00000000_11111111_00000000, a, b); @@ -18299,7 +19211,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_blend_epi16() { + const unsafe fn test_mm256_mask_blend_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(2); let r = _mm256_mask_blend_epi16(0b11111111_00000000, a, b); @@ -18308,7 +19220,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_blend_epi16() { + const unsafe fn test_mm_mask_blend_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(2); let r = _mm_mask_blend_epi16(0b11110000, a, b); @@ -18317,7 +19229,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_blend_epi8() { + const unsafe fn test_mm512_mask_blend_epi8() { let a = _mm512_set1_epi8(1); let b = _mm512_set1_epi8(2); let r = _mm512_mask_blend_epi8( @@ -18334,7 +19246,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_blend_epi8() { + const unsafe fn test_mm256_mask_blend_epi8() { let a = _mm256_set1_epi8(1); let b = _mm256_set1_epi8(2); let r = _mm256_mask_blend_epi8(0b11111111_00000000_11111111_00000000, a, b); @@ -18345,7 +19257,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_blend_epi8() { + const unsafe fn test_mm_mask_blend_epi8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(2); let r = _mm_mask_blend_epi8(0b11111111_00000000, a, b); @@ -18354,7 +19266,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_broadcastw_epi16() { + const unsafe fn test_mm512_broadcastw_epi16() { let a = _mm_set_epi16(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_broadcastw_epi16(a); let e = _mm512_set1_epi16(24); @@ -18362,7 +19274,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_broadcastw_epi16() { + const unsafe fn test_mm512_mask_broadcastw_epi16() { let src = _mm512_set1_epi16(1); let a = _mm_set_epi16(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_mask_broadcastw_epi16(src, 0, a); @@ -18373,7 +19285,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_broadcastw_epi16() { + const unsafe fn test_mm512_maskz_broadcastw_epi16() { let a = _mm_set_epi16(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_maskz_broadcastw_epi16(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -18383,7 +19295,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_broadcastw_epi16() { + const unsafe fn test_mm256_mask_broadcastw_epi16() { let src = _mm256_set1_epi16(1); let a = _mm_set_epi16(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm256_mask_broadcastw_epi16(src, 0, a); @@ -18394,7 +19306,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_broadcastw_epi16() { + const unsafe fn test_mm256_maskz_broadcastw_epi16() { let a = _mm_set_epi16(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm256_maskz_broadcastw_epi16(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -18404,7 +19316,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_broadcastw_epi16() { + const unsafe fn test_mm_mask_broadcastw_epi16() { let src = _mm_set1_epi16(1); let a = _mm_set_epi16(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm_mask_broadcastw_epi16(src, 0, a); @@ -18415,7 +19327,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_broadcastw_epi16() { + const unsafe fn test_mm_maskz_broadcastw_epi16() { let a = _mm_set_epi16(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm_maskz_broadcastw_epi16(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -18425,7 +19337,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_broadcastb_epi8() { + const unsafe fn test_mm512_broadcastb_epi8() { let a = _mm_set_epi8( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); @@ -18435,7 +19347,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_broadcastb_epi8() { + const unsafe fn test_mm512_mask_broadcastb_epi8() { let src = _mm512_set1_epi8(1); let a = _mm_set_epi8( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -18452,7 +19364,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_broadcastb_epi8() { + const unsafe fn test_mm512_maskz_broadcastb_epi8() { let a = _mm_set_epi8( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); @@ -18467,7 +19379,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_broadcastb_epi8() { + const unsafe fn test_mm256_mask_broadcastb_epi8() { let src = _mm256_set1_epi8(1); let a = _mm_set_epi8( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -18480,7 +19392,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_broadcastb_epi8() { + const unsafe fn test_mm256_maskz_broadcastb_epi8() { let a = _mm_set_epi8( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); @@ -18492,7 +19404,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_broadcastb_epi8() { + const unsafe fn test_mm_mask_broadcastb_epi8() { let src = _mm_set1_epi8(1); let a = _mm_set_epi8( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -18505,7 +19417,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_broadcastb_epi8() { + const unsafe fn test_mm_maskz_broadcastb_epi8() { let a = _mm_set_epi8( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); @@ -18517,7 +19429,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_unpackhi_epi16() { + const unsafe fn test_mm512_unpackhi_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); @@ -18532,7 +19444,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_unpackhi_epi16() { + const unsafe fn test_mm512_mask_unpackhi_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); @@ -18549,7 +19461,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_unpackhi_epi16() { + const unsafe fn test_mm512_maskz_unpackhi_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); @@ -18566,7 +19478,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_unpackhi_epi16() { + const unsafe fn test_mm256_mask_unpackhi_epi16() { let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm256_set_epi16( 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, @@ -18579,7 +19491,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_unpackhi_epi16() { + const unsafe fn test_mm256_maskz_unpackhi_epi16() { let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm256_set_epi16( 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, @@ -18592,7 +19504,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_unpackhi_epi16() { + const unsafe fn test_mm_mask_unpackhi_epi16() { let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_set_epi16(33, 34, 35, 36, 37, 38, 39, 40); let r = _mm_mask_unpackhi_epi16(a, 0, a, b); @@ -18603,7 +19515,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_unpackhi_epi16() { + const unsafe fn test_mm_maskz_unpackhi_epi16() { let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_set_epi16(33, 34, 35, 36, 37, 38, 39, 40); let r = _mm_maskz_unpackhi_epi16(0, a, b); @@ -18614,7 +19526,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_unpackhi_epi8() { + const unsafe fn test_mm512_unpackhi_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -18635,7 +19547,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_unpackhi_epi8() { + const unsafe fn test_mm512_mask_unpackhi_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -18663,7 +19575,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_unpackhi_epi8() { + const unsafe fn test_mm512_maskz_unpackhi_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -18690,7 +19602,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_unpackhi_epi8() { + const unsafe fn test_mm256_mask_unpackhi_epi8() { #[rustfmt::skip] let a = _mm256_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); @@ -18707,7 +19619,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_unpackhi_epi8() { + const unsafe fn test_mm256_maskz_unpackhi_epi8() { #[rustfmt::skip] let a = _mm256_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); @@ -18724,7 +19636,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_unpackhi_epi8() { + const unsafe fn test_mm_mask_unpackhi_epi8() { let a = _mm_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm_set_epi8( 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, @@ -18737,7 +19649,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_unpackhi_epi8() { + const unsafe fn test_mm_maskz_unpackhi_epi8() { let a = _mm_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm_set_epi8( 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, @@ -18750,7 +19662,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_unpacklo_epi16() { + const unsafe fn test_mm512_unpacklo_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); @@ -18765,7 +19677,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_unpacklo_epi16() { + const unsafe fn test_mm512_mask_unpacklo_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); @@ -18782,7 +19694,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_unpacklo_epi16() { + const unsafe fn test_mm512_maskz_unpacklo_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); @@ -18799,7 +19711,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_unpacklo_epi16() { + const unsafe fn test_mm256_mask_unpacklo_epi16() { let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm256_set_epi16( 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, @@ -18812,7 +19724,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_unpacklo_epi16() { + const unsafe fn test_mm256_maskz_unpacklo_epi16() { let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm256_set_epi16( 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, @@ -18825,7 +19737,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_unpacklo_epi16() { + const unsafe fn test_mm_mask_unpacklo_epi16() { let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_set_epi16(33, 34, 35, 36, 37, 38, 39, 40); let r = _mm_mask_unpacklo_epi16(a, 0, a, b); @@ -18836,7 +19748,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_unpacklo_epi16() { + const unsafe fn test_mm_maskz_unpacklo_epi16() { let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_set_epi16(33, 34, 35, 36, 37, 38, 39, 40); let r = _mm_maskz_unpacklo_epi16(0, a, b); @@ -18847,7 +19759,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_unpacklo_epi8() { + const unsafe fn test_mm512_unpacklo_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -18868,7 +19780,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_unpacklo_epi8() { + const unsafe fn test_mm512_mask_unpacklo_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -18896,7 +19808,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_unpacklo_epi8() { + const unsafe fn test_mm512_maskz_unpacklo_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -18923,7 +19835,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_unpacklo_epi8() { + const unsafe fn test_mm256_mask_unpacklo_epi8() { #[rustfmt::skip] let a = _mm256_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); @@ -18940,7 +19852,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_unpacklo_epi8() { + const unsafe fn test_mm256_maskz_unpacklo_epi8() { #[rustfmt::skip] let a = _mm256_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32); @@ -18957,7 +19869,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_unpacklo_epi8() { + const unsafe fn test_mm_mask_unpacklo_epi8() { let a = _mm_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm_set_epi8( 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, @@ -18972,7 +19884,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_unpacklo_epi8() { + const unsafe fn test_mm_maskz_unpacklo_epi8() { let a = _mm_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm_set_epi8( 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, @@ -18987,7 +19899,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_mov_epi16() { + const unsafe fn test_mm512_mask_mov_epi16() { let src = _mm512_set1_epi16(1); let a = _mm512_set1_epi16(2); let r = _mm512_mask_mov_epi16(src, 0, a); @@ -18997,7 +19909,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_mov_epi16() { + const unsafe fn test_mm512_maskz_mov_epi16() { let a = _mm512_set1_epi16(2); let r = _mm512_maskz_mov_epi16(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -19006,7 +19918,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_mov_epi16() { + const unsafe fn test_mm256_mask_mov_epi16() { let src = _mm256_set1_epi16(1); let a = _mm256_set1_epi16(2); let r = _mm256_mask_mov_epi16(src, 0, a); @@ -19016,7 +19928,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_mov_epi16() { + const unsafe fn test_mm256_maskz_mov_epi16() { let a = _mm256_set1_epi16(2); let r = _mm256_maskz_mov_epi16(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -19025,7 +19937,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_mov_epi16() { + const unsafe fn test_mm_mask_mov_epi16() { let src = _mm_set1_epi16(1); let a = _mm_set1_epi16(2); let r = _mm_mask_mov_epi16(src, 0, a); @@ -19035,7 +19947,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_mov_epi16() { + const unsafe fn test_mm_maskz_mov_epi16() { let a = _mm_set1_epi16(2); let r = _mm_maskz_mov_epi16(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -19044,7 +19956,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_mov_epi8() { + const unsafe fn test_mm512_mask_mov_epi8() { let src = _mm512_set1_epi8(1); let a = _mm512_set1_epi8(2); let r = _mm512_mask_mov_epi8(src, 0, a); @@ -19058,7 +19970,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_mov_epi8() { + const unsafe fn test_mm512_maskz_mov_epi8() { let a = _mm512_set1_epi8(2); let r = _mm512_maskz_mov_epi8(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -19070,7 +19982,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_mov_epi8() { + const unsafe fn test_mm256_mask_mov_epi8() { let src = _mm256_set1_epi8(1); let a = _mm256_set1_epi8(2); let r = _mm256_mask_mov_epi8(src, 0, a); @@ -19080,7 +19992,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_mov_epi8() { + const unsafe fn test_mm256_maskz_mov_epi8() { let a = _mm256_set1_epi8(2); let r = _mm256_maskz_mov_epi8(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -19089,7 +20001,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_mov_epi8() { + const unsafe fn test_mm_mask_mov_epi8() { let src = _mm_set1_epi8(1); let a = _mm_set1_epi8(2); let r = _mm_mask_mov_epi8(src, 0, a); @@ -19099,7 +20011,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_mov_epi8() { + const unsafe fn test_mm_maskz_mov_epi8() { let a = _mm_set1_epi8(2); let r = _mm_maskz_mov_epi8(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -19108,7 +20020,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_set1_epi16() { + const unsafe fn test_mm512_mask_set1_epi16() { let src = _mm512_set1_epi16(2); let a: i16 = 11; let r = _mm512_mask_set1_epi16(src, 0, a); @@ -19119,7 +20031,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_set1_epi16() { + const unsafe fn test_mm512_maskz_set1_epi16() { let a: i16 = 11; let r = _mm512_maskz_set1_epi16(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -19129,7 +20041,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_set1_epi16() { + const unsafe fn test_mm256_mask_set1_epi16() { let src = _mm256_set1_epi16(2); let a: i16 = 11; let r = _mm256_mask_set1_epi16(src, 0, a); @@ -19140,7 +20052,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_set1_epi16() { + const unsafe fn test_mm256_maskz_set1_epi16() { let a: i16 = 11; let r = _mm256_maskz_set1_epi16(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -19150,7 +20062,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_set1_epi16() { + const unsafe fn test_mm_mask_set1_epi16() { let src = _mm_set1_epi16(2); let a: i16 = 11; let r = _mm_mask_set1_epi16(src, 0, a); @@ -19161,7 +20073,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_set1_epi16() { + const unsafe fn test_mm_maskz_set1_epi16() { let a: i16 = 11; let r = _mm_maskz_set1_epi16(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -19171,7 +20083,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_set1_epi8() { + const unsafe fn test_mm512_mask_set1_epi8() { let src = _mm512_set1_epi8(2); let a: i8 = 11; let r = _mm512_mask_set1_epi8(src, 0, a); @@ -19186,7 +20098,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_set1_epi8() { + const unsafe fn test_mm512_maskz_set1_epi8() { let a: i8 = 11; let r = _mm512_maskz_set1_epi8(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -19199,7 +20111,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_set1_epi8() { + const unsafe fn test_mm256_mask_set1_epi8() { let src = _mm256_set1_epi8(2); let a: i8 = 11; let r = _mm256_mask_set1_epi8(src, 0, a); @@ -19210,7 +20122,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_set1_epi8() { + const unsafe fn test_mm256_maskz_set1_epi8() { let a: i8 = 11; let r = _mm256_maskz_set1_epi8(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -19220,7 +20132,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_set1_epi8() { + const unsafe fn test_mm_mask_set1_epi8() { let src = _mm_set1_epi8(2); let a: i8 = 11; let r = _mm_mask_set1_epi8(src, 0, a); @@ -19231,7 +20143,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_set1_epi8() { + const unsafe fn test_mm_maskz_set1_epi8() { let a: i8 = 11; let r = _mm_maskz_set1_epi8(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -19241,7 +20153,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_shufflelo_epi16() { + const unsafe fn test_mm512_shufflelo_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -19257,7 +20169,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_shufflelo_epi16() { + const unsafe fn test_mm512_mask_shufflelo_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -19279,7 +20191,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_shufflelo_epi16() { + const unsafe fn test_mm512_maskz_shufflelo_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -19298,7 +20210,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_shufflelo_epi16() { + const unsafe fn test_mm256_mask_shufflelo_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm256_mask_shufflelo_epi16::<0b00_01_01_11>(a, 0, a); assert_eq_m256i(r, a); @@ -19308,7 +20220,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_shufflelo_epi16() { + const unsafe fn test_mm256_maskz_shufflelo_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm256_maskz_shufflelo_epi16::<0b00_01_01_11>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -19318,7 +20230,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_shufflelo_epi16() { + const unsafe fn test_mm_mask_shufflelo_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm_mask_shufflelo_epi16::<0b00_01_01_11>(a, 0, a); assert_eq_m128i(r, a); @@ -19328,7 +20240,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_shufflelo_epi16() { + const unsafe fn test_mm_maskz_shufflelo_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm_maskz_shufflelo_epi16::<0b00_01_01_11>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -19338,7 +20250,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_shufflehi_epi16() { + const unsafe fn test_mm512_shufflehi_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -19354,7 +20266,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_shufflehi_epi16() { + const unsafe fn test_mm512_mask_shufflehi_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -19376,7 +20288,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_shufflehi_epi16() { + const unsafe fn test_mm512_maskz_shufflehi_epi16() { #[rustfmt::skip] let a = _mm512_set_epi16( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -19395,7 +20307,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_shufflehi_epi16() { + const unsafe fn test_mm256_mask_shufflehi_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm256_mask_shufflehi_epi16::<0b00_01_01_11>(a, 0, a); assert_eq_m256i(r, a); @@ -19405,7 +20317,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_shufflehi_epi16() { + const unsafe fn test_mm256_maskz_shufflehi_epi16() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm256_maskz_shufflehi_epi16::<0b00_01_01_11>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -19415,7 +20327,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_shufflehi_epi16() { + const unsafe fn test_mm_mask_shufflehi_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm_mask_shufflehi_epi16::<0b00_01_01_11>(a, 0, a); assert_eq_m128i(r, a); @@ -19425,7 +20337,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_shufflehi_epi16() { + const unsafe fn test_mm_maskz_shufflehi_epi16() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm_maskz_shufflehi_epi16::<0b00_01_01_11>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -19556,7 +20468,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_test_epi16_mask() { + const unsafe fn test_mm512_test_epi16_mask() { let a = _mm512_set1_epi16(1 << 0); let b = _mm512_set1_epi16(1 << 0 | 1 << 1); let r = _mm512_test_epi16_mask(a, b); @@ -19565,7 +20477,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_test_epi16_mask() { + const unsafe fn test_mm512_mask_test_epi16_mask() { let a = _mm512_set1_epi16(1 << 0); let b = _mm512_set1_epi16(1 << 0 | 1 << 1); let r = _mm512_mask_test_epi16_mask(0, a, b); @@ -19576,7 +20488,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_test_epi16_mask() { + const unsafe fn test_mm256_test_epi16_mask() { let a = _mm256_set1_epi16(1 << 0); let b = _mm256_set1_epi16(1 << 0 | 1 << 1); let r = _mm256_test_epi16_mask(a, b); @@ -19585,7 +20497,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_test_epi16_mask() { + const unsafe fn test_mm256_mask_test_epi16_mask() { let a = _mm256_set1_epi16(1 << 0); let b = _mm256_set1_epi16(1 << 0 | 1 << 1); let r = _mm256_mask_test_epi16_mask(0, a, b); @@ -19596,7 +20508,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_test_epi16_mask() { + const unsafe fn test_mm_test_epi16_mask() { let a = _mm_set1_epi16(1 << 0); let b = _mm_set1_epi16(1 << 0 | 1 << 1); let r = _mm_test_epi16_mask(a, b); @@ -19605,7 +20517,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_test_epi16_mask() { + const unsafe fn test_mm_mask_test_epi16_mask() { let a = _mm_set1_epi16(1 << 0); let b = _mm_set1_epi16(1 << 0 | 1 << 1); let r = _mm_mask_test_epi16_mask(0, a, b); @@ -19616,7 +20528,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_test_epi8_mask() { + const unsafe fn test_mm512_test_epi8_mask() { let a = _mm512_set1_epi8(1 << 0); let b = _mm512_set1_epi8(1 << 0 | 1 << 1); let r = _mm512_test_epi8_mask(a, b); @@ -19626,7 +20538,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_test_epi8_mask() { + const unsafe fn test_mm512_mask_test_epi8_mask() { let a = _mm512_set1_epi8(1 << 0); let b = _mm512_set1_epi8(1 << 0 | 1 << 1); let r = _mm512_mask_test_epi8_mask(0, a, b); @@ -19642,7 +20554,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_test_epi8_mask() { + const unsafe fn test_mm256_test_epi8_mask() { let a = _mm256_set1_epi8(1 << 0); let b = _mm256_set1_epi8(1 << 0 | 1 << 1); let r = _mm256_test_epi8_mask(a, b); @@ -19651,7 +20563,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_test_epi8_mask() { + const unsafe fn test_mm256_mask_test_epi8_mask() { let a = _mm256_set1_epi8(1 << 0); let b = _mm256_set1_epi8(1 << 0 | 1 << 1); let r = _mm256_mask_test_epi8_mask(0, a, b); @@ -19662,7 +20574,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_test_epi8_mask() { + const unsafe fn test_mm_test_epi8_mask() { let a = _mm_set1_epi8(1 << 0); let b = _mm_set1_epi8(1 << 0 | 1 << 1); let r = _mm_test_epi8_mask(a, b); @@ -19671,7 +20583,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_test_epi8_mask() { + const unsafe fn test_mm_mask_test_epi8_mask() { let a = _mm_set1_epi8(1 << 0); let b = _mm_set1_epi8(1 << 0 | 1 << 1); let r = _mm_mask_test_epi8_mask(0, a, b); @@ -19682,7 +20594,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_testn_epi16_mask() { + const unsafe fn test_mm512_testn_epi16_mask() { let a = _mm512_set1_epi16(1 << 0); let b = _mm512_set1_epi16(1 << 0 | 1 << 1); let r = _mm512_testn_epi16_mask(a, b); @@ -19691,7 +20603,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_testn_epi16_mask() { + const unsafe fn test_mm512_mask_testn_epi16_mask() { let a = _mm512_set1_epi16(1 << 0); let b = _mm512_set1_epi16(1 << 0 | 1 << 1); let r = _mm512_mask_testn_epi16_mask(0, a, b); @@ -19702,7 +20614,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_testn_epi16_mask() { + const unsafe fn test_mm256_testn_epi16_mask() { let a = _mm256_set1_epi16(1 << 0); let b = _mm256_set1_epi16(1 << 0 | 1 << 1); let r = _mm256_testn_epi16_mask(a, b); @@ -19711,7 +20623,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_testn_epi16_mask() { + const unsafe fn test_mm256_mask_testn_epi16_mask() { let a = _mm256_set1_epi16(1 << 0); let b = _mm256_set1_epi16(1 << 0 | 1 << 1); let r = _mm256_mask_testn_epi16_mask(0, a, b); @@ -19722,7 +20634,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_testn_epi16_mask() { + const unsafe fn test_mm_testn_epi16_mask() { let a = _mm_set1_epi16(1 << 0); let b = _mm_set1_epi16(1 << 0 | 1 << 1); let r = _mm_testn_epi16_mask(a, b); @@ -19731,7 +20643,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_testn_epi16_mask() { + const unsafe fn test_mm_mask_testn_epi16_mask() { let a = _mm_set1_epi16(1 << 0); let b = _mm_set1_epi16(1 << 0 | 1 << 1); let r = _mm_mask_testn_epi16_mask(0, a, b); @@ -19742,7 +20654,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_testn_epi8_mask() { + const unsafe fn test_mm512_testn_epi8_mask() { let a = _mm512_set1_epi8(1 << 0); let b = _mm512_set1_epi8(1 << 0 | 1 << 1); let r = _mm512_testn_epi8_mask(a, b); @@ -19752,7 +20664,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_testn_epi8_mask() { + const unsafe fn test_mm512_mask_testn_epi8_mask() { let a = _mm512_set1_epi8(1 << 0); let b = _mm512_set1_epi8(1 << 0 | 1 << 1); let r = _mm512_mask_testn_epi8_mask(0, a, b); @@ -19768,7 +20680,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_testn_epi8_mask() { + const unsafe fn test_mm256_testn_epi8_mask() { let a = _mm256_set1_epi8(1 << 0); let b = _mm256_set1_epi8(1 << 0 | 1 << 1); let r = _mm256_testn_epi8_mask(a, b); @@ -19777,7 +20689,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_testn_epi8_mask() { + const unsafe fn test_mm256_mask_testn_epi8_mask() { let a = _mm256_set1_epi8(1 << 0); let b = _mm256_set1_epi8(1 << 0 | 1 << 1); let r = _mm256_mask_testn_epi8_mask(0, a, b); @@ -19788,7 +20700,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_testn_epi8_mask() { + const unsafe fn test_mm_testn_epi8_mask() { let a = _mm_set1_epi8(1 << 0); let b = _mm_set1_epi8(1 << 0 | 1 << 1); let r = _mm_testn_epi8_mask(a, b); @@ -19797,7 +20709,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_testn_epi8_mask() { + const unsafe fn test_mm_mask_testn_epi8_mask() { let a = _mm_set1_epi8(1 << 0); let b = _mm_set1_epi8(1 << 0 | 1 << 1); let r = _mm_mask_testn_epi8_mask(0, a, b); @@ -19808,7 +20720,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_store_mask64() { + const unsafe fn test_store_mask64() { let a: __mmask64 = 0b11111111_00000000_11111111_00000000_11111111_00000000_11111111_00000000; let mut r = 0; @@ -19817,7 +20729,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_store_mask32() { + const unsafe fn test_store_mask32() { let a: __mmask32 = 0b11111111_00000000_11111111_00000000; let mut r = 0; _store_mask32(&mut r, a); @@ -19825,7 +20737,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_load_mask64() { + const unsafe fn test_load_mask64() { let p: __mmask64 = 0b11111111_00000000_11111111_00000000_11111111_00000000_11111111_00000000; let r = _load_mask64(&p); @@ -19835,7 +20747,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_load_mask32() { + const unsafe fn test_load_mask32() { let p: __mmask32 = 0b11111111_00000000_11111111_00000000; let r = _load_mask32(&p); let e: __mmask32 = 0b11111111_00000000_11111111_00000000; @@ -19948,7 +20860,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_movepi16_mask() { + const unsafe fn test_mm512_movepi16_mask() { let a = _mm512_set1_epi16(1 << 15); let r = _mm512_movepi16_mask(a); let e: __mmask32 = 0b11111111_11111111_11111111_11111111; @@ -19956,7 +20868,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_movepi16_mask() { + const unsafe fn test_mm256_movepi16_mask() { let a = _mm256_set1_epi16(1 << 15); let r = _mm256_movepi16_mask(a); let e: __mmask16 = 0b11111111_11111111; @@ -19964,7 +20876,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_movepi16_mask() { + const unsafe fn test_mm_movepi16_mask() { let a = _mm_set1_epi16(1 << 15); let r = _mm_movepi16_mask(a); let e: __mmask8 = 0b11111111; @@ -19972,7 +20884,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_movepi8_mask() { + const unsafe fn test_mm512_movepi8_mask() { let a = _mm512_set1_epi8(1 << 7); let r = _mm512_movepi8_mask(a); let e: __mmask64 = @@ -19981,7 +20893,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_movepi8_mask() { + const unsafe fn test_mm256_movepi8_mask() { let a = _mm256_set1_epi8(1 << 7); let r = _mm256_movepi8_mask(a); let e: __mmask32 = 0b11111111_11111111_11111111_11111111; @@ -19989,7 +20901,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_movepi8_mask() { + const unsafe fn test_mm_movepi8_mask() { let a = _mm_set1_epi8(1 << 7); let r = _mm_movepi8_mask(a); let e: __mmask16 = 0b11111111_11111111; @@ -19997,7 +20909,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_movm_epi16() { + const unsafe fn test_mm512_movm_epi16() { let a: __mmask32 = 0b11111111_11111111_11111111_11111111; let r = _mm512_movm_epi16(a); let e = _mm512_set1_epi16( @@ -20022,7 +20934,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_movm_epi16() { + const unsafe fn test_mm256_movm_epi16() { let a: __mmask16 = 0b11111111_11111111; let r = _mm256_movm_epi16(a); let e = _mm256_set1_epi16( @@ -20047,7 +20959,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_movm_epi16() { + const unsafe fn test_mm_movm_epi16() { let a: __mmask8 = 0b11111111; let r = _mm_movm_epi16(a); let e = _mm_set1_epi16( @@ -20072,7 +20984,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_movm_epi8() { + const unsafe fn test_mm512_movm_epi8() { let a: __mmask64 = 0b11111111_11111111_11111111_11111111_11111111_11111111_11111111_11111111; let r = _mm512_movm_epi8(a); @@ -20082,7 +20994,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_movm_epi8() { + const unsafe fn test_mm256_movm_epi8() { let a: __mmask32 = 0b11111111_11111111_11111111_11111111; let r = _mm256_movm_epi8(a); let e = @@ -20091,7 +21003,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_movm_epi8() { + const unsafe fn test_mm_movm_epi8() { let a: __mmask16 = 0b11111111_11111111; let r = _mm_movm_epi8(a); let e = @@ -20100,7 +21012,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_cvtmask32_u32() { + const unsafe fn test_cvtmask32_u32() { let a: __mmask32 = 0b11001100_00110011_01100110_10011001; let r = _cvtmask32_u32(a); let e: u32 = 0b11001100_00110011_01100110_10011001; @@ -20108,7 +21020,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_cvtu32_mask32() { + const unsafe fn test_cvtu32_mask32() { let a: u32 = 0b11001100_00110011_01100110_10011001; let r = _cvtu32_mask32(a); let e: __mmask32 = 0b11001100_00110011_01100110_10011001; @@ -20116,7 +21028,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kadd_mask32() { + const unsafe fn test_kadd_mask32() { let a: __mmask32 = 11; let b: __mmask32 = 22; let r = _kadd_mask32(a, b); @@ -20125,7 +21037,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kadd_mask64() { + const unsafe fn test_kadd_mask64() { let a: __mmask64 = 11; let b: __mmask64 = 22; let r = _kadd_mask64(a, b); @@ -20134,7 +21046,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kand_mask32() { + const unsafe fn test_kand_mask32() { let a: __mmask32 = 0b11001100_00110011_11001100_00110011; let b: __mmask32 = 0b11001100_00110011_11001100_00110011; let r = _kand_mask32(a, b); @@ -20143,7 +21055,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kand_mask64() { + const unsafe fn test_kand_mask64() { let a: __mmask64 = 0b11001100_00110011_11001100_00110011_11001100_00110011_11001100_00110011; let b: __mmask64 = @@ -20155,7 +21067,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_knot_mask32() { + const unsafe fn test_knot_mask32() { let a: __mmask32 = 0b11001100_00110011_11001100_00110011; let r = _knot_mask32(a); let e: __mmask32 = 0b00110011_11001100_00110011_11001100; @@ -20163,7 +21075,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_knot_mask64() { + const unsafe fn test_knot_mask64() { let a: __mmask64 = 0b11001100_00110011_11001100_00110011_11001100_00110011_11001100_00110011; let r = _knot_mask64(a); @@ -20173,7 +21085,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kandn_mask32() { + const unsafe fn test_kandn_mask32() { let a: __mmask32 = 0b11001100_00110011_11001100_00110011; let b: __mmask32 = 0b11001100_00110011_11001100_00110011; let r = _kandn_mask32(a, b); @@ -20182,7 +21094,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kandn_mask64() { + const unsafe fn test_kandn_mask64() { let a: __mmask64 = 0b11001100_00110011_11001100_00110011_11001100_00110011_11001100_00110011; let b: __mmask64 = @@ -20194,7 +21106,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kor_mask32() { + const unsafe fn test_kor_mask32() { let a: __mmask32 = 0b00110011_11001100_00110011_11001100; let b: __mmask32 = 0b11001100_00110011_11001100_00110011; let r = _kor_mask32(a, b); @@ -20203,7 +21115,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kor_mask64() { + const unsafe fn test_kor_mask64() { let a: __mmask64 = 0b00110011_11001100_00110011_11001100_00110011_11001100_00110011_11001100; let b: __mmask64 = @@ -20215,7 +21127,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kxor_mask32() { + const unsafe fn test_kxor_mask32() { let a: __mmask32 = 0b00110011_11001100_00110011_11001100; let b: __mmask32 = 0b11001100_00110011_11001100_00110011; let r = _kxor_mask32(a, b); @@ -20224,7 +21136,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kxor_mask64() { + const unsafe fn test_kxor_mask64() { let a: __mmask64 = 0b00110011_11001100_00110011_11001100_00110011_11001100_00110011_11001100; let b: __mmask64 = @@ -20236,7 +21148,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kxnor_mask32() { + const unsafe fn test_kxnor_mask32() { let a: __mmask32 = 0b00110011_11001100_00110011_11001100; let b: __mmask32 = 0b11001100_00110011_11001100_00110011; let r = _kxnor_mask32(a, b); @@ -20245,7 +21157,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kxnor_mask64() { + const unsafe fn test_kxnor_mask64() { let a: __mmask64 = 0b00110011_11001100_00110011_11001100_00110011_11001100_00110011_11001100; let b: __mmask64 = @@ -20257,7 +21169,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kortest_mask32_u8() { + const unsafe fn test_kortest_mask32_u8() { let a: __mmask32 = 0b0110100101101001_0110100101101001; let b: __mmask32 = 0b1011011010110110_1011011010110110; let mut all_ones: u8 = 0; @@ -20267,7 +21179,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kortest_mask64_u8() { + const unsafe fn test_kortest_mask64_u8() { let a: __mmask64 = 0b0110100101101001_0110100101101001; let b: __mmask64 = 0b1011011010110110_1011011010110110; let mut all_ones: u8 = 0; @@ -20277,7 +21189,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kortestc_mask32_u8() { + const unsafe fn test_kortestc_mask32_u8() { let a: __mmask32 = 0b0110100101101001_0110100101101001; let b: __mmask32 = 0b1011011010110110_1011011010110110; let r = _kortestc_mask32_u8(a, b); @@ -20285,7 +21197,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kortestc_mask64_u8() { + const unsafe fn test_kortestc_mask64_u8() { let a: __mmask64 = 0b0110100101101001_0110100101101001; let b: __mmask64 = 0b1011011010110110_1011011010110110; let r = _kortestc_mask64_u8(a, b); @@ -20293,7 +21205,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kortestz_mask32_u8() { + const unsafe fn test_kortestz_mask32_u8() { let a: __mmask32 = 0b0110100101101001_0110100101101001; let b: __mmask32 = 0b1011011010110110_1011011010110110; let r = _kortestz_mask32_u8(a, b); @@ -20301,7 +21213,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kortestz_mask64_u8() { + const unsafe fn test_kortestz_mask64_u8() { let a: __mmask64 = 0b0110100101101001_0110100101101001; let b: __mmask64 = 0b1011011010110110_1011011010110110; let r = _kortestz_mask64_u8(a, b); @@ -20309,7 +21221,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kshiftli_mask32() { + const unsafe fn test_kshiftli_mask32() { let a: __mmask32 = 0b0110100101101001_0110100101101001; let r = _kshiftli_mask32::<3>(a); let e: __mmask32 = 0b0100101101001011_0100101101001000; @@ -20329,7 +21241,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kshiftli_mask64() { + const unsafe fn test_kshiftli_mask64() { let a: __mmask64 = 0b0110100101101001_0110100101101001; let r = _kshiftli_mask64::<3>(a); let e: __mmask64 = 0b0110100101101001011_0100101101001000; @@ -20349,7 +21261,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kshiftri_mask32() { + const unsafe fn test_kshiftri_mask32() { let a: __mmask32 = 0b1010100101101001_0110100101101001; let r = _kshiftri_mask32::<3>(a); let e: __mmask32 = 0b0001010100101101_0010110100101101; @@ -20369,7 +21281,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_kshiftri_mask64() { + const unsafe fn test_kshiftri_mask64() { let a: __mmask64 = 0b1010100101101001011_0100101101001000; let r = _kshiftri_mask64::<3>(a); let e: __mmask64 = 0b1010100101101001_0110100101101001; @@ -20393,7 +21305,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_ktest_mask32_u8() { + const unsafe fn test_ktest_mask32_u8() { let a: __mmask32 = 0b0110100100111100_0110100100111100; let b: __mmask32 = 0b1001011011000011_1001011011000011; let mut and_not: u8 = 0; @@ -20403,7 +21315,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_ktestc_mask32_u8() { + const unsafe fn test_ktestc_mask32_u8() { let a: __mmask32 = 0b0110100100111100_0110100100111100; let b: __mmask32 = 0b1001011011000011_1001011011000011; let r = _ktestc_mask32_u8(a, b); @@ -20411,7 +21323,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_ktestz_mask32_u8() { + const unsafe fn test_ktestz_mask32_u8() { let a: __mmask32 = 0b0110100100111100_0110100100111100; let b: __mmask32 = 0b1001011011000011_1001011011000011; let r = _ktestz_mask32_u8(a, b); @@ -20419,7 +21331,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_ktest_mask64_u8() { + const unsafe fn test_ktest_mask64_u8() { let a: __mmask64 = 0b0110100100111100_0110100100111100; let b: __mmask64 = 0b1001011011000011_1001011011000011; let mut and_not: u8 = 0; @@ -20429,7 +21341,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_ktestc_mask64_u8() { + const unsafe fn test_ktestc_mask64_u8() { let a: __mmask64 = 0b0110100100111100_0110100100111100; let b: __mmask64 = 0b1001011011000011_1001011011000011; let r = _ktestc_mask64_u8(a, b); @@ -20437,7 +21349,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_ktestz_mask64_u8() { + const unsafe fn test_ktestz_mask64_u8() { let a: __mmask64 = 0b0110100100111100_0110100100111100; let b: __mmask64 = 0b1001011011000011_1001011011000011; let r = _ktestz_mask64_u8(a, b); @@ -20445,7 +21357,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_kunpackw() { + const unsafe fn test_mm512_kunpackw() { let a: u32 = 0x00110011; let b: u32 = 0x00001011; let r = _mm512_kunpackw(a, b); @@ -20454,7 +21366,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_kunpackd() { + const unsafe fn test_mm512_kunpackd() { let a: u64 = 0x11001100_00110011; let b: u64 = 0x00101110_00001011; let r = _mm512_kunpackd(a, b); @@ -20463,7 +21375,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cvtepi16_epi8() { + const unsafe fn test_mm512_cvtepi16_epi8() { let a = _mm512_set1_epi16(2); let r = _mm512_cvtepi16_epi8(a); let e = _mm256_set1_epi8(2); @@ -20471,7 +21383,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cvtepi16_epi8() { + const unsafe fn test_mm512_mask_cvtepi16_epi8() { let src = _mm256_set1_epi8(1); let a = _mm512_set1_epi16(2); let r = _mm512_mask_cvtepi16_epi8(src, 0, a); @@ -20482,7 +21394,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_cvtepi16_epi8() { + const unsafe fn test_mm512_maskz_cvtepi16_epi8() { let a = _mm512_set1_epi16(2); let r = _mm512_maskz_cvtepi16_epi8(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -20492,7 +21404,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_cvtepi16_epi8() { + const unsafe fn test_mm256_cvtepi16_epi8() { let a = _mm256_set1_epi16(2); let r = _mm256_cvtepi16_epi8(a); let e = _mm_set1_epi8(2); @@ -20500,7 +21412,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cvtepi16_epi8() { + const unsafe fn test_mm256_mask_cvtepi16_epi8() { let src = _mm_set1_epi8(1); let a = _mm256_set1_epi16(2); let r = _mm256_mask_cvtepi16_epi8(src, 0, a); @@ -20511,7 +21423,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi16_epi8() { + const unsafe fn test_mm256_maskz_cvtepi16_epi8() { let a = _mm256_set1_epi16(2); let r = _mm256_maskz_cvtepi16_epi8(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -20521,7 +21433,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_cvtepi16_epi8() { + const unsafe fn test_mm_cvtepi16_epi8() { let a = _mm_set1_epi16(2); let r = _mm_cvtepi16_epi8(a); let e = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2); @@ -20529,7 +21441,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cvtepi16_epi8() { + const unsafe fn test_mm_mask_cvtepi16_epi8() { let src = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1); let a = _mm_set1_epi16(2); let r = _mm_mask_cvtepi16_epi8(src, 0, a); @@ -20540,7 +21452,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_cvtepi16_epi8() { + const unsafe fn test_mm_maskz_cvtepi16_epi8() { let a = _mm_set1_epi16(2); let r = _mm_maskz_cvtepi16_epi8(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -20727,7 +21639,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cvtepi8_epi16() { + const unsafe fn test_mm512_cvtepi8_epi16() { let a = _mm256_set1_epi8(2); let r = _mm512_cvtepi8_epi16(a); let e = _mm512_set1_epi16(2); @@ -20735,7 +21647,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cvtepi8_epi16() { + const unsafe fn test_mm512_mask_cvtepi8_epi16() { let src = _mm512_set1_epi16(1); let a = _mm256_set1_epi8(2); let r = _mm512_mask_cvtepi8_epi16(src, 0, a); @@ -20746,7 +21658,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_cvtepi8_epi16() { + const unsafe fn test_mm512_maskz_cvtepi8_epi16() { let a = _mm256_set1_epi8(2); let r = _mm512_maskz_cvtepi8_epi16(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -20756,7 +21668,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cvtepi8_epi16() { + const unsafe fn test_mm256_mask_cvtepi8_epi16() { let src = _mm256_set1_epi16(1); let a = _mm_set1_epi8(2); let r = _mm256_mask_cvtepi8_epi16(src, 0, a); @@ -20767,7 +21679,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi8_epi16() { + const unsafe fn test_mm256_maskz_cvtepi8_epi16() { let a = _mm_set1_epi8(2); let r = _mm256_maskz_cvtepi8_epi16(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -20777,7 +21689,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cvtepi8_epi16() { + const unsafe fn test_mm_mask_cvtepi8_epi16() { let src = _mm_set1_epi16(1); let a = _mm_set1_epi8(2); let r = _mm_mask_cvtepi8_epi16(src, 0, a); @@ -20788,7 +21700,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_cvtepi8_epi16() { + const unsafe fn test_mm_maskz_cvtepi8_epi16() { let a = _mm_set1_epi8(2); let r = _mm_maskz_cvtepi8_epi16(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -20798,7 +21710,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_cvtepu8_epi16() { + const unsafe fn test_mm512_cvtepu8_epi16() { let a = _mm256_set1_epi8(2); let r = _mm512_cvtepu8_epi16(a); let e = _mm512_set1_epi16(2); @@ -20806,7 +21718,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_cvtepu8_epi16() { + const unsafe fn test_mm512_mask_cvtepu8_epi16() { let src = _mm512_set1_epi16(1); let a = _mm256_set1_epi8(2); let r = _mm512_mask_cvtepu8_epi16(src, 0, a); @@ -20817,7 +21729,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_cvtepu8_epi16() { + const unsafe fn test_mm512_maskz_cvtepu8_epi16() { let a = _mm256_set1_epi8(2); let r = _mm512_maskz_cvtepu8_epi16(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -20827,7 +21739,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_cvtepu8_epi16() { + const unsafe fn test_mm256_mask_cvtepu8_epi16() { let src = _mm256_set1_epi16(1); let a = _mm_set1_epi8(2); let r = _mm256_mask_cvtepu8_epi16(src, 0, a); @@ -20838,7 +21750,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_cvtepu8_epi16() { + const unsafe fn test_mm256_maskz_cvtepu8_epi16() { let a = _mm_set1_epi8(2); let r = _mm256_maskz_cvtepu8_epi16(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -20848,7 +21760,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_cvtepu8_epi16() { + const unsafe fn test_mm_mask_cvtepu8_epi16() { let src = _mm_set1_epi16(1); let a = _mm_set1_epi8(2); let r = _mm_mask_cvtepu8_epi16(src, 0, a); @@ -20859,7 +21771,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_cvtepu8_epi16() { + const unsafe fn test_mm_maskz_cvtepu8_epi16() { let a = _mm_set1_epi8(2); let r = _mm_maskz_cvtepu8_epi16(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -20869,7 +21781,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_bslli_epi128() { + const unsafe fn test_mm512_bslli_epi128() { #[rustfmt::skip] let a = _mm512_set_epi8( 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, @@ -20889,7 +21801,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_bsrli_epi128() { + const unsafe fn test_mm512_bsrli_epi128() { #[rustfmt::skip] let a = _mm512_set_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, @@ -20909,7 +21821,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_alignr_epi8() { + const unsafe fn test_mm512_alignr_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8( 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, @@ -20930,7 +21842,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_mask_alignr_epi8() { + const unsafe fn test_mm512_mask_alignr_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8( 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, @@ -20958,7 +21870,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_mm512_maskz_alignr_epi8() { + const unsafe fn test_mm512_maskz_alignr_epi8() { #[rustfmt::skip] let a = _mm512_set_epi8( 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, @@ -20985,7 +21897,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_mask_alignr_epi8() { + const unsafe fn test_mm256_mask_alignr_epi8() { #[rustfmt::skip] let a = _mm256_set_epi8( 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, @@ -21004,7 +21916,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm256_maskz_alignr_epi8() { + const unsafe fn test_mm256_maskz_alignr_epi8() { #[rustfmt::skip] let a = _mm256_set_epi8( 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, @@ -21023,7 +21935,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_mask_alignr_epi8() { + const unsafe fn test_mm_mask_alignr_epi8() { let a = _mm_set_epi8(1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0); let b = _mm_set1_epi8(1); let r = _mm_mask_alignr_epi8::<14>(a, 0, a, b); @@ -21034,7 +21946,7 @@ mod tests { } #[simd_test(enable = "avx512bw,avx512vl")] - unsafe fn test_mm_maskz_alignr_epi8() { + const unsafe fn test_mm_maskz_alignr_epi8() { let a = _mm_set_epi8(1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0); let b = _mm_set1_epi8(1); let r = _mm_maskz_alignr_epi8::<14>(0, a, b); diff --git a/crates/core_arch/src/x86/avx512cd.rs b/crates/core_arch/src/x86/avx512cd.rs index 78735fcc90..b163698b56 100644 --- a/crates/core_arch/src/x86/avx512cd.rs +++ b/crates/core_arch/src/x86/avx512cd.rs @@ -11,7 +11,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "avx512cd")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d -pub fn _mm512_broadcastmw_epi32(k: __mmask16) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcastmw_epi32(k: __mmask16) -> __m512i { _mm512_set1_epi32(k as i32) } @@ -22,7 +23,8 @@ pub fn _mm512_broadcastmw_epi32(k: __mmask16) -> __m512i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d -pub fn _mm256_broadcastmw_epi32(k: __mmask16) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcastmw_epi32(k: __mmask16) -> __m256i { _mm256_set1_epi32(k as i32) } @@ -33,7 +35,8 @@ pub fn _mm256_broadcastmw_epi32(k: __mmask16) -> __m256i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d -pub fn _mm_broadcastmw_epi32(k: __mmask16) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_broadcastmw_epi32(k: __mmask16) -> __m128i { _mm_set1_epi32(k as i32) } @@ -44,7 +47,8 @@ pub fn _mm_broadcastmw_epi32(k: __mmask16) -> __m128i { #[target_feature(enable = "avx512cd")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q -pub fn _mm512_broadcastmb_epi64(k: __mmask8) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcastmb_epi64(k: __mmask8) -> __m512i { _mm512_set1_epi64(k as i64) } @@ -55,7 +59,8 @@ pub fn _mm512_broadcastmb_epi64(k: __mmask8) -> __m512i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q -pub fn _mm256_broadcastmb_epi64(k: __mmask8) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcastmb_epi64(k: __mmask8) -> __m256i { _mm256_set1_epi64x(k as i64) } @@ -66,7 +71,8 @@ pub fn _mm256_broadcastmb_epi64(k: __mmask8) -> __m256i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q -pub fn _mm_broadcastmb_epi64(k: __mmask8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_broadcastmb_epi64(k: __mmask8) -> __m128i { _mm_set1_epi64x(k as i64) } @@ -311,7 +317,8 @@ pub fn _mm_maskz_conflict_epi64(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512cd")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntd))] -pub fn _mm512_lzcnt_epi32(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_lzcnt_epi32(a: __m512i) -> __m512i { unsafe { transmute(simd_ctlz(a.as_i32x16())) } } @@ -322,7 +329,8 @@ pub fn _mm512_lzcnt_epi32(a: __m512i) -> __m512i { #[target_feature(enable = "avx512cd")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntd))] -pub fn _mm512_mask_lzcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_lzcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { unsafe { let zerocount = _mm512_lzcnt_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, zerocount, src.as_i32x16())) @@ -336,7 +344,8 @@ pub fn _mm512_mask_lzcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512 #[target_feature(enable = "avx512cd")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntd))] -pub fn _mm512_maskz_lzcnt_epi32(k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_lzcnt_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { let zerocount = _mm512_lzcnt_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, zerocount, i32x16::ZERO)) @@ -350,7 +359,8 @@ pub fn _mm512_maskz_lzcnt_epi32(k: __mmask16, a: __m512i) -> __m512i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntd))] -pub fn _mm256_lzcnt_epi32(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_lzcnt_epi32(a: __m256i) -> __m256i { unsafe { transmute(simd_ctlz(a.as_i32x8())) } } @@ -361,7 +371,8 @@ pub fn _mm256_lzcnt_epi32(a: __m256i) -> __m256i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntd))] -pub fn _mm256_mask_lzcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_lzcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { let zerocount = _mm256_lzcnt_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, zerocount, src.as_i32x8())) @@ -375,7 +386,8 @@ pub fn _mm256_mask_lzcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntd))] -pub fn _mm256_maskz_lzcnt_epi32(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_lzcnt_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { let zerocount = _mm256_lzcnt_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, zerocount, i32x8::ZERO)) @@ -389,7 +401,8 @@ pub fn _mm256_maskz_lzcnt_epi32(k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntd))] -pub fn _mm_lzcnt_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_lzcnt_epi32(a: __m128i) -> __m128i { unsafe { transmute(simd_ctlz(a.as_i32x4())) } } @@ -400,7 +413,8 @@ pub fn _mm_lzcnt_epi32(a: __m128i) -> __m128i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntd))] -pub fn _mm_mask_lzcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_lzcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let zerocount = _mm_lzcnt_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, zerocount, src.as_i32x4())) @@ -414,7 +428,8 @@ pub fn _mm_mask_lzcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntd))] -pub fn _mm_maskz_lzcnt_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_lzcnt_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { let zerocount = _mm_lzcnt_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, zerocount, i32x4::ZERO)) @@ -428,7 +443,8 @@ pub fn _mm_maskz_lzcnt_epi32(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512cd")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntq))] -pub fn _mm512_lzcnt_epi64(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_lzcnt_epi64(a: __m512i) -> __m512i { unsafe { transmute(simd_ctlz(a.as_i64x8())) } } @@ -439,7 +455,8 @@ pub fn _mm512_lzcnt_epi64(a: __m512i) -> __m512i { #[target_feature(enable = "avx512cd")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntq))] -pub fn _mm512_mask_lzcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_lzcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { unsafe { let zerocount = _mm512_lzcnt_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, zerocount, src.as_i64x8())) @@ -453,7 +470,8 @@ pub fn _mm512_mask_lzcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i #[target_feature(enable = "avx512cd")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntq))] -pub fn _mm512_maskz_lzcnt_epi64(k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_lzcnt_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { let zerocount = _mm512_lzcnt_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, zerocount, i64x8::ZERO)) @@ -467,7 +485,8 @@ pub fn _mm512_maskz_lzcnt_epi64(k: __mmask8, a: __m512i) -> __m512i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntq))] -pub fn _mm256_lzcnt_epi64(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_lzcnt_epi64(a: __m256i) -> __m256i { unsafe { transmute(simd_ctlz(a.as_i64x4())) } } @@ -478,7 +497,8 @@ pub fn _mm256_lzcnt_epi64(a: __m256i) -> __m256i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntq))] -pub fn _mm256_mask_lzcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_lzcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { let zerocount = _mm256_lzcnt_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, zerocount, src.as_i64x4())) @@ -492,7 +512,8 @@ pub fn _mm256_mask_lzcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntq))] -pub fn _mm256_maskz_lzcnt_epi64(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_lzcnt_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { let zerocount = _mm256_lzcnt_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, zerocount, i64x4::ZERO)) @@ -506,7 +527,8 @@ pub fn _mm256_maskz_lzcnt_epi64(k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntq))] -pub fn _mm_lzcnt_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_lzcnt_epi64(a: __m128i) -> __m128i { unsafe { transmute(simd_ctlz(a.as_i64x2())) } } @@ -517,7 +539,8 @@ pub fn _mm_lzcnt_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntq))] -pub fn _mm_mask_lzcnt_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_lzcnt_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let zerocount = _mm_lzcnt_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, zerocount, src.as_i64x2())) @@ -531,7 +554,8 @@ pub fn _mm_mask_lzcnt_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512cd,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vplzcntq))] -pub fn _mm_maskz_lzcnt_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_lzcnt_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { let zerocount = _mm_lzcnt_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, zerocount, i64x2::ZERO)) @@ -557,12 +581,13 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use crate::core_arch::x86::*; use stdarch_test::simd_test; #[simd_test(enable = "avx512cd")] - unsafe fn test_mm512_broadcastmw_epi32() { + const unsafe fn test_mm512_broadcastmw_epi32() { let a: __mmask16 = 2; let r = _mm512_broadcastmw_epi32(a); let e = _mm512_set1_epi32(2); @@ -570,7 +595,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm256_broadcastmw_epi32() { + const unsafe fn test_mm256_broadcastmw_epi32() { let a: __mmask16 = 2; let r = _mm256_broadcastmw_epi32(a); let e = _mm256_set1_epi32(2); @@ -578,7 +603,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm_broadcastmw_epi32() { + const unsafe fn test_mm_broadcastmw_epi32() { let a: __mmask16 = 2; let r = _mm_broadcastmw_epi32(a); let e = _mm_set1_epi32(2); @@ -586,7 +611,7 @@ mod tests { } #[simd_test(enable = "avx512cd")] - unsafe fn test_mm512_broadcastmb_epi64() { + const unsafe fn test_mm512_broadcastmb_epi64() { let a: __mmask8 = 2; let r = _mm512_broadcastmb_epi64(a); let e = _mm512_set1_epi64(2); @@ -594,7 +619,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm256_broadcastmb_epi64() { + const unsafe fn test_mm256_broadcastmb_epi64() { let a: __mmask8 = 2; let r = _mm256_broadcastmb_epi64(a); let e = _mm256_set1_epi64x(2); @@ -602,7 +627,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm_broadcastmb_epi64() { + const unsafe fn test_mm_broadcastmb_epi64() { let a: __mmask8 = 2; let r = _mm_broadcastmb_epi64(a); let e = _mm_set1_epi64x(2); @@ -1063,7 +1088,7 @@ mod tests { } #[simd_test(enable = "avx512cd")] - unsafe fn test_mm512_lzcnt_epi32() { + const unsafe fn test_mm512_lzcnt_epi32() { let a = _mm512_set1_epi32(1); let r = _mm512_lzcnt_epi32(a); let e = _mm512_set1_epi32(31); @@ -1071,7 +1096,7 @@ mod tests { } #[simd_test(enable = "avx512cd")] - unsafe fn test_mm512_mask_lzcnt_epi32() { + const unsafe fn test_mm512_mask_lzcnt_epi32() { let a = _mm512_set1_epi32(1); let r = _mm512_mask_lzcnt_epi32(a, 0, a); assert_eq_m512i(r, a); @@ -1081,7 +1106,7 @@ mod tests { } #[simd_test(enable = "avx512cd")] - unsafe fn test_mm512_maskz_lzcnt_epi32() { + const unsafe fn test_mm512_maskz_lzcnt_epi32() { let a = _mm512_set1_epi32(2); let r = _mm512_maskz_lzcnt_epi32(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -1091,7 +1116,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm256_lzcnt_epi32() { + const unsafe fn test_mm256_lzcnt_epi32() { let a = _mm256_set1_epi32(1); let r = _mm256_lzcnt_epi32(a); let e = _mm256_set1_epi32(31); @@ -1099,7 +1124,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm256_mask_lzcnt_epi32() { + const unsafe fn test_mm256_mask_lzcnt_epi32() { let a = _mm256_set1_epi32(1); let r = _mm256_mask_lzcnt_epi32(a, 0, a); assert_eq_m256i(r, a); @@ -1109,7 +1134,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm256_maskz_lzcnt_epi32() { + const unsafe fn test_mm256_maskz_lzcnt_epi32() { let a = _mm256_set1_epi32(1); let r = _mm256_maskz_lzcnt_epi32(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -1119,7 +1144,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm_lzcnt_epi32() { + const unsafe fn test_mm_lzcnt_epi32() { let a = _mm_set1_epi32(1); let r = _mm_lzcnt_epi32(a); let e = _mm_set1_epi32(31); @@ -1127,7 +1152,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm_mask_lzcnt_epi32() { + const unsafe fn test_mm_mask_lzcnt_epi32() { let a = _mm_set1_epi32(1); let r = _mm_mask_lzcnt_epi32(a, 0, a); assert_eq_m128i(r, a); @@ -1137,7 +1162,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm_maskz_lzcnt_epi32() { + const unsafe fn test_mm_maskz_lzcnt_epi32() { let a = _mm_set1_epi32(1); let r = _mm_maskz_lzcnt_epi32(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -1147,7 +1172,7 @@ mod tests { } #[simd_test(enable = "avx512cd")] - unsafe fn test_mm512_lzcnt_epi64() { + const unsafe fn test_mm512_lzcnt_epi64() { let a = _mm512_set1_epi64(1); let r = _mm512_lzcnt_epi64(a); let e = _mm512_set1_epi64(63); @@ -1155,7 +1180,7 @@ mod tests { } #[simd_test(enable = "avx512cd")] - unsafe fn test_mm512_mask_lzcnt_epi64() { + const unsafe fn test_mm512_mask_lzcnt_epi64() { let a = _mm512_set1_epi64(1); let r = _mm512_mask_lzcnt_epi64(a, 0, a); assert_eq_m512i(r, a); @@ -1165,7 +1190,7 @@ mod tests { } #[simd_test(enable = "avx512cd")] - unsafe fn test_mm512_maskz_lzcnt_epi64() { + const unsafe fn test_mm512_maskz_lzcnt_epi64() { let a = _mm512_set1_epi64(2); let r = _mm512_maskz_lzcnt_epi64(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -1175,7 +1200,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm256_lzcnt_epi64() { + const unsafe fn test_mm256_lzcnt_epi64() { let a = _mm256_set1_epi64x(1); let r = _mm256_lzcnt_epi64(a); let e = _mm256_set1_epi64x(63); @@ -1183,7 +1208,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm256_mask_lzcnt_epi64() { + const unsafe fn test_mm256_mask_lzcnt_epi64() { let a = _mm256_set1_epi64x(1); let r = _mm256_mask_lzcnt_epi64(a, 0, a); assert_eq_m256i(r, a); @@ -1193,7 +1218,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm256_maskz_lzcnt_epi64() { + const unsafe fn test_mm256_maskz_lzcnt_epi64() { let a = _mm256_set1_epi64x(1); let r = _mm256_maskz_lzcnt_epi64(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -1203,7 +1228,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm_lzcnt_epi64() { + const unsafe fn test_mm_lzcnt_epi64() { let a = _mm_set1_epi64x(1); let r = _mm_lzcnt_epi64(a); let e = _mm_set1_epi64x(63); @@ -1211,7 +1236,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm_mask_lzcnt_epi64() { + const unsafe fn test_mm_mask_lzcnt_epi64() { let a = _mm_set1_epi64x(1); let r = _mm_mask_lzcnt_epi64(a, 0, a); assert_eq_m128i(r, a); @@ -1221,7 +1246,7 @@ mod tests { } #[simd_test(enable = "avx512cd,avx512vl")] - unsafe fn test_mm_maskz_lzcnt_epi64() { + const unsafe fn test_mm_maskz_lzcnt_epi64() { let a = _mm_set1_epi64x(1); let r = _mm_maskz_lzcnt_epi64(0, a); assert_eq_m128i(r, _mm_setzero_si128()); diff --git a/crates/core_arch/src/x86/avx512dq.rs b/crates/core_arch/src/x86/avx512dq.rs index a691687502..4ab5880c55 100644 --- a/crates/core_arch/src/x86/avx512dq.rs +++ b/crates/core_arch/src/x86/avx512dq.rs @@ -15,7 +15,8 @@ use crate::{ #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_and_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_and_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let and = _mm_and_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, and, src.as_f64x2())) @@ -30,7 +31,8 @@ pub fn _mm_mask_and_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_maskz_and_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_and_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let and = _mm_and_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, and, f64x2::ZERO)) @@ -46,7 +48,8 @@ pub fn _mm_maskz_and_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_and_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_and_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let and = _mm256_and_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, and, src.as_f64x4())) @@ -61,7 +64,8 @@ pub fn _mm256_mask_and_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_and_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_and_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let and = _mm256_and_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, and, f64x4::ZERO)) @@ -76,7 +80,8 @@ pub fn _mm256_maskz_and_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandp))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_and_pd(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_and_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { transmute(simd_and(transmute::<_, u64x8>(a), transmute::<_, u64x8>(b))) } } @@ -89,7 +94,8 @@ pub fn _mm512_and_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_and_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_and_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let and = _mm512_and_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, and, src.as_f64x8())) @@ -104,7 +110,8 @@ pub fn _mm512_mask_and_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_and_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_and_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let and = _mm512_and_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, and, f64x8::ZERO)) @@ -120,7 +127,8 @@ pub fn _mm512_maskz_and_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_and_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_and_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let and = _mm_and_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, and, src.as_f32x4())) @@ -135,7 +143,8 @@ pub fn _mm_mask_and_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_maskz_and_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_and_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let and = _mm_and_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, and, f32x4::ZERO)) @@ -151,7 +160,8 @@ pub fn _mm_maskz_and_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_and_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_and_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let and = _mm256_and_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, and, src.as_f32x8())) @@ -166,7 +176,8 @@ pub fn _mm256_mask_and_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_and_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_and_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let and = _mm256_and_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, and, f32x8::ZERO)) @@ -181,7 +192,8 @@ pub fn _mm256_maskz_and_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_and_ps(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_and_ps(a: __m512, b: __m512) -> __m512 { unsafe { transmute(simd_and( transmute::<_, u32x16>(a), @@ -199,7 +211,8 @@ pub fn _mm512_and_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_and_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_and_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let and = _mm512_and_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, and, src.as_f32x16())) @@ -214,7 +227,8 @@ pub fn _mm512_mask_and_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __ #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_and_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_and_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let and = _mm512_and_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, and, f32x16::ZERO)) @@ -232,7 +246,8 @@ pub fn _mm512_maskz_and_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandnpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_andnot_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_andnot_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let andnot = _mm_andnot_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, andnot, src.as_f64x2())) @@ -248,7 +263,8 @@ pub fn _mm_mask_andnot_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandnpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_maskz_andnot_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_andnot_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let andnot = _mm_andnot_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, andnot, f64x2::ZERO)) @@ -264,7 +280,8 @@ pub fn _mm_maskz_andnot_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandnpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_andnot_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_andnot_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let andnot = _mm256_andnot_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, andnot, src.as_f64x4())) @@ -280,7 +297,8 @@ pub fn _mm256_mask_andnot_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandnpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_andnot_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_andnot_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let andnot = _mm256_andnot_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, andnot, f64x4::ZERO)) @@ -295,7 +313,8 @@ pub fn _mm256_maskz_andnot_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandnp))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_andnot_pd(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_andnot_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { _mm512_and_pd(_mm512_xor_pd(a, transmute(_mm512_set1_epi64(-1))), b) } } @@ -308,7 +327,8 @@ pub fn _mm512_andnot_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandnpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_andnot_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_andnot_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let andnot = _mm512_andnot_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, andnot, src.as_f64x8())) @@ -324,7 +344,8 @@ pub fn _mm512_mask_andnot_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandnpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_andnot_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_andnot_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let andnot = _mm512_andnot_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, andnot, f64x8::ZERO)) @@ -340,7 +361,8 @@ pub fn _mm512_maskz_andnot_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandnps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_andnot_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_andnot_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let andnot = _mm_andnot_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, andnot, src.as_f32x4())) @@ -356,7 +378,8 @@ pub fn _mm_mask_andnot_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandnps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_maskz_andnot_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_andnot_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let andnot = _mm_andnot_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, andnot, f32x4::ZERO)) @@ -372,7 +395,8 @@ pub fn _mm_maskz_andnot_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandnps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_andnot_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_andnot_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let andnot = _mm256_andnot_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, andnot, src.as_f32x8())) @@ -388,7 +412,8 @@ pub fn _mm256_mask_andnot_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vandnps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_andnot_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_andnot_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let andnot = _mm256_andnot_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, andnot, f32x8::ZERO)) @@ -403,7 +428,8 @@ pub fn _mm256_maskz_andnot_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandnps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_andnot_ps(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_andnot_ps(a: __m512, b: __m512) -> __m512 { unsafe { _mm512_and_ps(_mm512_xor_ps(a, transmute(_mm512_set1_epi32(-1))), b) } } @@ -416,7 +442,8 @@ pub fn _mm512_andnot_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandnps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_andnot_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_andnot_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let andnot = _mm512_andnot_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, andnot, src.as_f32x16())) @@ -432,7 +459,8 @@ pub fn _mm512_mask_andnot_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vandnps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_andnot_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_andnot_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let andnot = _mm512_andnot_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, andnot, f32x16::ZERO)) @@ -450,7 +478,8 @@ pub fn _mm512_maskz_andnot_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_or_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_or_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let or = _mm_or_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, or, src.as_f64x2())) @@ -465,7 +494,8 @@ pub fn _mm_mask_or_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m1 #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_maskz_or_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_or_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let or = _mm_or_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, or, f64x2::ZERO)) @@ -481,7 +511,8 @@ pub fn _mm_maskz_or_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_or_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_or_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let or = _mm256_or_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, or, src.as_f64x4())) @@ -496,7 +527,8 @@ pub fn _mm256_mask_or_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> _ #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_or_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_or_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let or = _mm256_or_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, or, f64x4::ZERO)) @@ -511,7 +543,8 @@ pub fn _mm256_maskz_or_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vorp))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_or_pd(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_or_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { transmute(simd_or(transmute::<_, u64x8>(a), transmute::<_, u64x8>(b))) } } @@ -524,7 +557,8 @@ pub fn _mm512_or_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_or_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_or_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let or = _mm512_or_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, or, src.as_f64x8())) @@ -539,7 +573,8 @@ pub fn _mm512_mask_or_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> _ #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_or_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_or_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let or = _mm512_or_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, or, f64x8::ZERO)) @@ -555,7 +590,8 @@ pub fn _mm512_maskz_or_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_or_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_or_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let or = _mm_or_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, or, src.as_f32x4())) @@ -570,7 +606,8 @@ pub fn _mm_mask_or_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_maskz_or_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_or_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let or = _mm_or_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, or, f32x4::ZERO)) @@ -586,7 +623,8 @@ pub fn _mm_maskz_or_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_or_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_or_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let or = _mm256_or_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, or, src.as_f32x8())) @@ -601,7 +639,8 @@ pub fn _mm256_mask_or_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m2 #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_or_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_or_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let or = _mm256_or_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, or, f32x8::ZERO)) @@ -616,7 +655,8 @@ pub fn _mm256_maskz_or_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_or_ps(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_or_ps(a: __m512, b: __m512) -> __m512 { unsafe { transmute(simd_or( transmute::<_, u32x16>(a), @@ -634,7 +674,8 @@ pub fn _mm512_or_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_or_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_or_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let or = _mm512_or_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, or, src.as_f32x16())) @@ -649,7 +690,8 @@ pub fn _mm512_mask_or_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_or_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_or_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let or = _mm512_or_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, or, f32x16::ZERO)) @@ -667,7 +709,8 @@ pub fn _mm512_maskz_or_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vxorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_xor_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_xor_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let xor = _mm_xor_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, xor, src.as_f64x2())) @@ -682,7 +725,8 @@ pub fn _mm_mask_xor_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vxorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_maskz_xor_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_xor_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let xor = _mm_xor_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, xor, f64x2::ZERO)) @@ -698,7 +742,8 @@ pub fn _mm_maskz_xor_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vxorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_xor_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_xor_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let xor = _mm256_xor_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, xor, src.as_f64x4())) @@ -713,7 +758,8 @@ pub fn _mm256_mask_xor_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vxorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_xor_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_xor_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let xor = _mm256_xor_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, xor, f64x4::ZERO)) @@ -728,7 +774,8 @@ pub fn _mm256_maskz_xor_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vxorp))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_xor_pd(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_xor_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { transmute(simd_xor(transmute::<_, u64x8>(a), transmute::<_, u64x8>(b))) } } @@ -741,7 +788,8 @@ pub fn _mm512_xor_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vxorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_xor_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_xor_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let xor = _mm512_xor_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, xor, src.as_f64x8())) @@ -756,7 +804,8 @@ pub fn _mm512_mask_xor_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vxorpd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_xor_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_xor_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let xor = _mm512_xor_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, xor, f64x8::ZERO)) @@ -772,7 +821,8 @@ pub fn _mm512_maskz_xor_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_xor_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_xor_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let xor = _mm_xor_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, xor, src.as_f32x4())) @@ -787,7 +837,8 @@ pub fn _mm_mask_xor_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_maskz_xor_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_xor_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let xor = _mm_xor_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, xor, f32x4::ZERO)) @@ -803,7 +854,8 @@ pub fn _mm_maskz_xor_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_xor_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_xor_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let xor = _mm256_xor_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, xor, src.as_f32x8())) @@ -818,7 +870,8 @@ pub fn _mm256_mask_xor_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_xor_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_xor_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let xor = _mm256_xor_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, xor, f32x8::ZERO)) @@ -833,7 +886,8 @@ pub fn _mm256_maskz_xor_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_xor_ps(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_xor_ps(a: __m512, b: __m512) -> __m512 { unsafe { transmute(simd_xor( transmute::<_, u32x16>(a), @@ -851,7 +905,8 @@ pub fn _mm512_xor_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_xor_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_xor_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let xor = _mm512_xor_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, xor, src.as_f32x16())) @@ -866,7 +921,8 @@ pub fn _mm512_mask_xor_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __ #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_xor_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_xor_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let xor = _mm512_xor_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, xor, f32x16::ZERO)) @@ -882,7 +938,8 @@ pub fn _mm512_maskz_xor_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_broadcast_f32x2(a: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcast_f32x2(a: __m128) -> __m256 { unsafe { let b: f32x8 = simd_shuffle!(a, a, [0, 1, 0, 1, 0, 1, 0, 1]); transmute(b) @@ -897,7 +954,8 @@ pub fn _mm256_broadcast_f32x2(a: __m128) -> __m256 { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vbroadcastf32x2))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_broadcast_f32x2(src: __m256, k: __mmask8, a: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcast_f32x2(src: __m256, k: __mmask8, a: __m128) -> __m256 { unsafe { let b = _mm256_broadcast_f32x2(a).as_f32x8(); transmute(simd_select_bitmask(k, b, src.as_f32x8())) @@ -912,7 +970,8 @@ pub fn _mm256_mask_broadcast_f32x2(src: __m256, k: __mmask8, a: __m128) -> __m25 #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vbroadcastf32x2))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_broadcast_f32x2(k: __mmask8, a: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcast_f32x2(k: __mmask8, a: __m128) -> __m256 { unsafe { let b = _mm256_broadcast_f32x2(a).as_f32x8(); transmute(simd_select_bitmask(k, b, f32x8::ZERO)) @@ -926,7 +985,8 @@ pub fn _mm256_maskz_broadcast_f32x2(k: __mmask8, a: __m128) -> __m256 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_broadcast_f32x2(a: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcast_f32x2(a: __m128) -> __m512 { unsafe { let b: f32x16 = simd_shuffle!(a, a, [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]); transmute(b) @@ -941,7 +1001,8 @@ pub fn _mm512_broadcast_f32x2(a: __m128) -> __m512 { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vbroadcastf32x2))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_broadcast_f32x2(src: __m512, k: __mmask16, a: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcast_f32x2(src: __m512, k: __mmask16, a: __m128) -> __m512 { unsafe { let b = _mm512_broadcast_f32x2(a).as_f32x16(); transmute(simd_select_bitmask(k, b, src.as_f32x16())) @@ -956,7 +1017,8 @@ pub fn _mm512_mask_broadcast_f32x2(src: __m512, k: __mmask16, a: __m128) -> __m5 #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vbroadcastf32x2))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_broadcast_f32x2(k: __mmask16, a: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcast_f32x2(k: __mmask16, a: __m128) -> __m512 { unsafe { let b = _mm512_broadcast_f32x2(a).as_f32x16(); transmute(simd_select_bitmask(k, b, f32x16::ZERO)) @@ -970,7 +1032,8 @@ pub fn _mm512_maskz_broadcast_f32x2(k: __mmask16, a: __m128) -> __m512 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_broadcast_f32x8(a: __m256) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcast_f32x8(a: __m256) -> __m512 { unsafe { let b: f32x16 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7]); transmute(b) @@ -984,7 +1047,8 @@ pub fn _mm512_broadcast_f32x8(a: __m256) -> __m512 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_broadcast_f32x8(src: __m512, k: __mmask16, a: __m256) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcast_f32x8(src: __m512, k: __mmask16, a: __m256) -> __m512 { unsafe { let b = _mm512_broadcast_f32x8(a).as_f32x16(); transmute(simd_select_bitmask(k, b, src.as_f32x16())) @@ -998,7 +1062,8 @@ pub fn _mm512_mask_broadcast_f32x8(src: __m512, k: __mmask16, a: __m256) -> __m5 #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_broadcast_f32x8(k: __mmask16, a: __m256) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcast_f32x8(k: __mmask16, a: __m256) -> __m512 { unsafe { let b = _mm512_broadcast_f32x8(a).as_f32x16(); transmute(simd_select_bitmask(k, b, f32x16::ZERO)) @@ -1012,7 +1077,8 @@ pub fn _mm512_maskz_broadcast_f32x8(k: __mmask16, a: __m256) -> __m512 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_broadcast_f64x2(a: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcast_f64x2(a: __m128d) -> __m256d { unsafe { let b: f64x4 = simd_shuffle!(a, a, [0, 1, 0, 1]); transmute(b) @@ -1026,7 +1092,8 @@ pub fn _mm256_broadcast_f64x2(a: __m128d) -> __m256d { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_broadcast_f64x2(src: __m256d, k: __mmask8, a: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcast_f64x2(src: __m256d, k: __mmask8, a: __m128d) -> __m256d { unsafe { let b = _mm256_broadcast_f64x2(a).as_f64x4(); transmute(simd_select_bitmask(k, b, src.as_f64x4())) @@ -1040,7 +1107,8 @@ pub fn _mm256_mask_broadcast_f64x2(src: __m256d, k: __mmask8, a: __m128d) -> __m #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_broadcast_f64x2(k: __mmask8, a: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcast_f64x2(k: __mmask8, a: __m128d) -> __m256d { unsafe { let b = _mm256_broadcast_f64x2(a).as_f64x4(); transmute(simd_select_bitmask(k, b, f64x4::ZERO)) @@ -1054,7 +1122,8 @@ pub fn _mm256_maskz_broadcast_f64x2(k: __mmask8, a: __m128d) -> __m256d { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_broadcast_f64x2(a: __m128d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcast_f64x2(a: __m128d) -> __m512d { unsafe { let b: f64x8 = simd_shuffle!(a, a, [0, 1, 0, 1, 0, 1, 0, 1]); transmute(b) @@ -1068,7 +1137,8 @@ pub fn _mm512_broadcast_f64x2(a: __m128d) -> __m512d { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_broadcast_f64x2(src: __m512d, k: __mmask8, a: __m128d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcast_f64x2(src: __m512d, k: __mmask8, a: __m128d) -> __m512d { unsafe { let b = _mm512_broadcast_f64x2(a).as_f64x8(); transmute(simd_select_bitmask(k, b, src.as_f64x8())) @@ -1082,7 +1152,8 @@ pub fn _mm512_mask_broadcast_f64x2(src: __m512d, k: __mmask8, a: __m128d) -> __m #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_broadcast_f64x2(k: __mmask8, a: __m128d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcast_f64x2(k: __mmask8, a: __m128d) -> __m512d { unsafe { let b = _mm512_broadcast_f64x2(a).as_f64x8(); transmute(simd_select_bitmask(k, b, f64x8::ZERO)) @@ -1095,7 +1166,8 @@ pub fn _mm512_maskz_broadcast_f64x2(k: __mmask8, a: __m128d) -> __m512d { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_broadcast_i32x2(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_broadcast_i32x2(a: __m128i) -> __m128i { unsafe { let a = a.as_i32x4(); let b: i32x4 = simd_shuffle!(a, a, [0, 1, 0, 1]); @@ -1111,7 +1183,8 @@ pub fn _mm_broadcast_i32x2(a: __m128i) -> __m128i { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vbroadcasti32x2))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_broadcast_i32x2(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_broadcast_i32x2(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let b = _mm_broadcast_i32x2(a).as_i32x4(); transmute(simd_select_bitmask(k, b, src.as_i32x4())) @@ -1126,7 +1199,8 @@ pub fn _mm_mask_broadcast_i32x2(src: __m128i, k: __mmask8, a: __m128i) -> __m128 #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vbroadcasti32x2))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_maskz_broadcast_i32x2(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_broadcast_i32x2(k: __mmask8, a: __m128i) -> __m128i { unsafe { let b = _mm_broadcast_i32x2(a).as_i32x4(); transmute(simd_select_bitmask(k, b, i32x4::ZERO)) @@ -1139,7 +1213,8 @@ pub fn _mm_maskz_broadcast_i32x2(k: __mmask8, a: __m128i) -> __m128i { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_broadcast_i32x2(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcast_i32x2(a: __m128i) -> __m256i { unsafe { let a = a.as_i32x4(); let b: i32x8 = simd_shuffle!(a, a, [0, 1, 0, 1, 0, 1, 0, 1]); @@ -1155,7 +1230,8 @@ pub fn _mm256_broadcast_i32x2(a: __m128i) -> __m256i { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vbroadcasti32x2))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_broadcast_i32x2(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcast_i32x2(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let b = _mm256_broadcast_i32x2(a).as_i32x8(); transmute(simd_select_bitmask(k, b, src.as_i32x8())) @@ -1170,7 +1246,8 @@ pub fn _mm256_mask_broadcast_i32x2(src: __m256i, k: __mmask8, a: __m128i) -> __m #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vbroadcasti32x2))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_broadcast_i32x2(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcast_i32x2(k: __mmask8, a: __m128i) -> __m256i { unsafe { let b = _mm256_broadcast_i32x2(a).as_i32x8(); transmute(simd_select_bitmask(k, b, i32x8::ZERO)) @@ -1183,7 +1260,8 @@ pub fn _mm256_maskz_broadcast_i32x2(k: __mmask8, a: __m128i) -> __m256i { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_broadcast_i32x2(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcast_i32x2(a: __m128i) -> __m512i { unsafe { let a = a.as_i32x4(); let b: i32x16 = simd_shuffle!(a, a, [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]); @@ -1199,7 +1277,8 @@ pub fn _mm512_broadcast_i32x2(a: __m128i) -> __m512i { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vbroadcasti32x2))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_broadcast_i32x2(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcast_i32x2(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { unsafe { let b = _mm512_broadcast_i32x2(a).as_i32x16(); transmute(simd_select_bitmask(k, b, src.as_i32x16())) @@ -1214,7 +1293,8 @@ pub fn _mm512_mask_broadcast_i32x2(src: __m512i, k: __mmask16, a: __m128i) -> __ #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vbroadcasti32x2))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_broadcast_i32x2(k: __mmask16, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcast_i32x2(k: __mmask16, a: __m128i) -> __m512i { unsafe { let b = _mm512_broadcast_i32x2(a).as_i32x16(); transmute(simd_select_bitmask(k, b, i32x16::ZERO)) @@ -1227,7 +1307,8 @@ pub fn _mm512_maskz_broadcast_i32x2(k: __mmask16, a: __m128i) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_broadcast_i32x8(a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcast_i32x8(a: __m256i) -> __m512i { unsafe { let a = a.as_i32x8(); let b: i32x16 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7]); @@ -1242,7 +1323,8 @@ pub fn _mm512_broadcast_i32x8(a: __m256i) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_broadcast_i32x8(src: __m512i, k: __mmask16, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcast_i32x8(src: __m512i, k: __mmask16, a: __m256i) -> __m512i { unsafe { let b = _mm512_broadcast_i32x8(a).as_i32x16(); transmute(simd_select_bitmask(k, b, src.as_i32x16())) @@ -1256,7 +1338,8 @@ pub fn _mm512_mask_broadcast_i32x8(src: __m512i, k: __mmask16, a: __m256i) -> __ #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_broadcast_i32x8(k: __mmask16, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcast_i32x8(k: __mmask16, a: __m256i) -> __m512i { unsafe { let b = _mm512_broadcast_i32x8(a).as_i32x16(); transmute(simd_select_bitmask(k, b, i32x16::ZERO)) @@ -1269,7 +1352,8 @@ pub fn _mm512_maskz_broadcast_i32x8(k: __mmask16, a: __m256i) -> __m512i { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_broadcast_i64x2(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcast_i64x2(a: __m128i) -> __m256i { unsafe { let a = a.as_i64x2(); let b: i64x4 = simd_shuffle!(a, a, [0, 1, 0, 1]); @@ -1284,7 +1368,8 @@ pub fn _mm256_broadcast_i64x2(a: __m128i) -> __m256i { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_broadcast_i64x2(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcast_i64x2(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let b = _mm256_broadcast_i64x2(a).as_i64x4(); transmute(simd_select_bitmask(k, b, src.as_i64x4())) @@ -1298,7 +1383,8 @@ pub fn _mm256_mask_broadcast_i64x2(src: __m256i, k: __mmask8, a: __m128i) -> __m #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_broadcast_i64x2(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcast_i64x2(k: __mmask8, a: __m128i) -> __m256i { unsafe { let b = _mm256_broadcast_i64x2(a).as_i64x4(); transmute(simd_select_bitmask(k, b, i64x4::ZERO)) @@ -1311,7 +1397,8 @@ pub fn _mm256_maskz_broadcast_i64x2(k: __mmask8, a: __m128i) -> __m256i { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_broadcast_i64x2(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcast_i64x2(a: __m128i) -> __m512i { unsafe { let a = a.as_i64x2(); let b: i64x8 = simd_shuffle!(a, a, [0, 1, 0, 1, 0, 1, 0, 1]); @@ -1326,7 +1413,8 @@ pub fn _mm512_broadcast_i64x2(a: __m128i) -> __m512i { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_broadcast_i64x2(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcast_i64x2(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { unsafe { let b = _mm512_broadcast_i64x2(a).as_i64x8(); transmute(simd_select_bitmask(k, b, src.as_i64x8())) @@ -1340,7 +1428,8 @@ pub fn _mm512_mask_broadcast_i64x2(src: __m512i, k: __mmask8, a: __m128i) -> __m #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_broadcast_i64x2(k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcast_i64x2(k: __mmask8, a: __m128i) -> __m512i { unsafe { let b = _mm512_broadcast_i64x2(a).as_i64x8(); transmute(simd_select_bitmask(k, b, i64x8::ZERO)) @@ -1357,7 +1446,8 @@ pub fn _mm512_maskz_broadcast_i64x2(k: __mmask8, a: __m128i) -> __m512i { #[target_feature(enable = "avx512dq")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_extractf32x8_ps(a: __m512) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_extractf32x8_ps(a: __m512) -> __m256 { unsafe { static_assert_uimm_bits!(IMM8, 1); match IMM8 & 1 { @@ -1377,7 +1467,12 @@ pub fn _mm512_extractf32x8_ps(a: __m512) -> __m256 { #[cfg_attr(test, assert_instr(vextractf32x8, IMM8 = 1))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_extractf32x8_ps(src: __m256, k: __mmask8, a: __m512) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_extractf32x8_ps( + src: __m256, + k: __mmask8, + a: __m512, +) -> __m256 { unsafe { static_assert_uimm_bits!(IMM8, 1); let b = _mm512_extractf32x8_ps::(a); @@ -1395,7 +1490,8 @@ pub fn _mm512_mask_extractf32x8_ps(src: __m256, k: __mmask8, a: #[cfg_attr(test, assert_instr(vextractf32x8, IMM8 = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_extractf32x8_ps(k: __mmask8, a: __m512) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_extractf32x8_ps(k: __mmask8, a: __m512) -> __m256 { unsafe { static_assert_uimm_bits!(IMM8, 1); let b = _mm512_extractf32x8_ps::(a); @@ -1411,7 +1507,8 @@ pub fn _mm512_maskz_extractf32x8_ps(k: __mmask8, a: __m512) -> #[target_feature(enable = "avx512dq,avx512vl")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_extractf64x2_pd(a: __m256d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extractf64x2_pd(a: __m256d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 1); match IMM8 & 1 { @@ -1431,7 +1528,8 @@ pub fn _mm256_extractf64x2_pd(a: __m256d) -> __m128d { #[cfg_attr(test, assert_instr(vextractf64x2, IMM8 = 1))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_extractf64x2_pd( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_extractf64x2_pd( src: __m128d, k: __mmask8, a: __m256d, @@ -1453,7 +1551,8 @@ pub fn _mm256_mask_extractf64x2_pd( #[cfg_attr(test, assert_instr(vextractf64x2, IMM8 = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_extractf64x2_pd(k: __mmask8, a: __m256d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_extractf64x2_pd(k: __mmask8, a: __m256d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 1); let b = _mm256_extractf64x2_pd::(a); @@ -1469,7 +1568,8 @@ pub fn _mm256_maskz_extractf64x2_pd(k: __mmask8, a: __m256d) -> #[target_feature(enable = "avx512dq")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_extractf64x2_pd(a: __m512d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_extractf64x2_pd(a: __m512d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 2); match IMM8 & 3 { @@ -1491,7 +1591,8 @@ pub fn _mm512_extractf64x2_pd(a: __m512d) -> __m128d { #[cfg_attr(test, assert_instr(vextractf64x2, IMM8 = 3))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_extractf64x2_pd( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_extractf64x2_pd( src: __m128d, k: __mmask8, a: __m512d, @@ -1513,7 +1614,8 @@ pub fn _mm512_mask_extractf64x2_pd( #[cfg_attr(test, assert_instr(vextractf64x2, IMM8 = 3))] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_extractf64x2_pd(k: __mmask8, a: __m512d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_extractf64x2_pd(k: __mmask8, a: __m512d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM8, 2); let b = _mm512_extractf64x2_pd::(a).as_f64x2(); @@ -1529,7 +1631,8 @@ pub fn _mm512_maskz_extractf64x2_pd(k: __mmask8, a: __m512d) -> #[target_feature(enable = "avx512dq")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_extracti32x8_epi32(a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_extracti32x8_epi32(a: __m512i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 1); let a = a.as_i32x16(); @@ -1550,7 +1653,8 @@ pub fn _mm512_extracti32x8_epi32(a: __m512i) -> __m256i { #[cfg_attr(test, assert_instr(vextracti32x8, IMM8 = 1))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_extracti32x8_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_extracti32x8_epi32( src: __m256i, k: __mmask8, a: __m512i, @@ -1571,7 +1675,8 @@ pub fn _mm512_mask_extracti32x8_epi32( #[cfg_attr(test, assert_instr(vextracti32x8, IMM8 = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_extracti32x8_epi32(k: __mmask8, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_extracti32x8_epi32(k: __mmask8, a: __m512i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 1); let b = _mm512_extracti32x8_epi32::(a).as_i32x8(); @@ -1587,7 +1692,8 @@ pub fn _mm512_maskz_extracti32x8_epi32(k: __mmask8, a: __m512i) #[target_feature(enable = "avx512dq,avx512vl")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_extracti64x2_epi64(a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extracti64x2_epi64(a: __m256i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 1); let a = a.as_i64x4(); @@ -1607,7 +1713,8 @@ pub fn _mm256_extracti64x2_epi64(a: __m256i) -> __m128i { #[cfg_attr(test, assert_instr(vextracti64x2, IMM8 = 1))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_extracti64x2_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_extracti64x2_epi64( src: __m128i, k: __mmask8, a: __m256i, @@ -1628,7 +1735,8 @@ pub fn _mm256_mask_extracti64x2_epi64( #[cfg_attr(test, assert_instr(vextracti64x2, IMM8 = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_extracti64x2_epi64(k: __mmask8, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_extracti64x2_epi64(k: __mmask8, a: __m256i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 1); let b = _mm256_extracti64x2_epi64::(a).as_i64x2(); @@ -1644,7 +1752,8 @@ pub fn _mm256_maskz_extracti64x2_epi64(k: __mmask8, a: __m256i) #[target_feature(enable = "avx512dq")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_extracti64x2_epi64(a: __m512i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_extracti64x2_epi64(a: __m512i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 2); let a = a.as_i64x8(); @@ -1666,7 +1775,8 @@ pub fn _mm512_extracti64x2_epi64(a: __m512i) -> __m128i { #[cfg_attr(test, assert_instr(vextracti64x2, IMM8 = 3))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_extracti64x2_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_extracti64x2_epi64( src: __m128i, k: __mmask8, a: __m512i, @@ -1687,7 +1797,8 @@ pub fn _mm512_mask_extracti64x2_epi64( #[cfg_attr(test, assert_instr(vextracti64x2, IMM8 = 3))] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_extracti64x2_epi64(k: __mmask8, a: __m512i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_extracti64x2_epi64(k: __mmask8, a: __m512i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 2); let b = _mm512_extracti64x2_epi64::(a).as_i64x2(); @@ -1705,7 +1816,8 @@ pub fn _mm512_maskz_extracti64x2_epi64(k: __mmask8, a: __m512i) #[target_feature(enable = "avx512dq")] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_insertf32x8(a: __m512, b: __m256) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_insertf32x8(a: __m512, b: __m256) -> __m512 { unsafe { static_assert_uimm_bits!(IMM8, 1); let b = _mm512_castps256_ps512(b); @@ -1738,7 +1850,8 @@ pub fn _mm512_insertf32x8(a: __m512, b: __m256) -> __m512 { #[cfg_attr(test, assert_instr(vinsertf32x8, IMM8 = 1))] #[rustc_legacy_const_generics(4)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_insertf32x8( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_insertf32x8( src: __m512, k: __mmask16, a: __m512, @@ -1761,7 +1874,12 @@ pub fn _mm512_mask_insertf32x8( #[cfg_attr(test, assert_instr(vinsertf32x8, IMM8 = 1))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_insertf32x8(k: __mmask16, a: __m512, b: __m256) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_insertf32x8( + k: __mmask16, + a: __m512, + b: __m256, +) -> __m512 { unsafe { static_assert_uimm_bits!(IMM8, 1); let c = _mm512_insertf32x8::(a, b).as_f32x16(); @@ -1777,7 +1895,8 @@ pub fn _mm512_maskz_insertf32x8(k: __mmask16, a: __m512, b: __m #[target_feature(enable = "avx512dq,avx512vl")] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_insertf64x2(a: __m256d, b: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_insertf64x2(a: __m256d, b: __m128d) -> __m256d { unsafe { static_assert_uimm_bits!(IMM8, 1); let b = _mm256_castpd128_pd256(b); @@ -1798,7 +1917,8 @@ pub fn _mm256_insertf64x2(a: __m256d, b: __m128d) -> __m256d { #[cfg_attr(test, assert_instr(vinsertf64x2, IMM8 = 1))] #[rustc_legacy_const_generics(4)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_insertf64x2( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_insertf64x2( src: __m256d, k: __mmask8, a: __m256d, @@ -1821,7 +1941,12 @@ pub fn _mm256_mask_insertf64x2( #[cfg_attr(test, assert_instr(vinsertf64x2, IMM8 = 1))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_insertf64x2(k: __mmask8, a: __m256d, b: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_insertf64x2( + k: __mmask8, + a: __m256d, + b: __m128d, +) -> __m256d { unsafe { static_assert_uimm_bits!(IMM8, 1); let c = _mm256_insertf64x2::(a, b).as_f64x4(); @@ -1837,7 +1962,8 @@ pub fn _mm256_maskz_insertf64x2(k: __mmask8, a: __m256d, b: __m #[target_feature(enable = "avx512dq")] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_insertf64x2(a: __m512d, b: __m128d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_insertf64x2(a: __m512d, b: __m128d) -> __m512d { unsafe { static_assert_uimm_bits!(IMM8, 2); let b = _mm512_castpd128_pd512(b); @@ -1860,7 +1986,8 @@ pub fn _mm512_insertf64x2(a: __m512d, b: __m128d) -> __m512d { #[cfg_attr(test, assert_instr(vinsertf64x2, IMM8 = 3))] #[rustc_legacy_const_generics(4)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_insertf64x2( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_insertf64x2( src: __m512d, k: __mmask8, a: __m512d, @@ -1883,7 +2010,12 @@ pub fn _mm512_mask_insertf64x2( #[cfg_attr(test, assert_instr(vinsertf64x2, IMM8 = 3))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_insertf64x2(k: __mmask8, a: __m512d, b: __m128d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_insertf64x2( + k: __mmask8, + a: __m512d, + b: __m128d, +) -> __m512d { unsafe { static_assert_uimm_bits!(IMM8, 2); let c = _mm512_insertf64x2::(a, b).as_f64x8(); @@ -1899,7 +2031,8 @@ pub fn _mm512_maskz_insertf64x2(k: __mmask8, a: __m512d, b: __m #[target_feature(enable = "avx512dq")] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_inserti32x8(a: __m512i, b: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_inserti32x8(a: __m512i, b: __m256i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 1); let a = a.as_i32x16(); @@ -1934,7 +2067,8 @@ pub fn _mm512_inserti32x8(a: __m512i, b: __m256i) -> __m512i { #[cfg_attr(test, assert_instr(vinserti32x8, IMM8 = 1))] #[rustc_legacy_const_generics(4)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_inserti32x8( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_inserti32x8( src: __m512i, k: __mmask16, a: __m512i, @@ -1957,7 +2091,12 @@ pub fn _mm512_mask_inserti32x8( #[cfg_attr(test, assert_instr(vinserti32x8, IMM8 = 1))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_inserti32x8(k: __mmask16, a: __m512i, b: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_inserti32x8( + k: __mmask16, + a: __m512i, + b: __m256i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 1); let c = _mm512_inserti32x8::(a, b).as_i32x16(); @@ -1973,7 +2112,8 @@ pub fn _mm512_maskz_inserti32x8(k: __mmask16, a: __m512i, b: __ #[target_feature(enable = "avx512dq,avx512vl")] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_inserti64x2(a: __m256i, b: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_inserti64x2(a: __m256i, b: __m128i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 1); let a = a.as_i64x4(); @@ -1995,7 +2135,8 @@ pub fn _mm256_inserti64x2(a: __m256i, b: __m128i) -> __m256i { #[cfg_attr(test, assert_instr(vinserti64x2, IMM8 = 1))] #[rustc_legacy_const_generics(4)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_inserti64x2( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_inserti64x2( src: __m256i, k: __mmask8, a: __m256i, @@ -2018,7 +2159,12 @@ pub fn _mm256_mask_inserti64x2( #[cfg_attr(test, assert_instr(vinserti64x2, IMM8 = 1))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_inserti64x2(k: __mmask8, a: __m256i, b: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_inserti64x2( + k: __mmask8, + a: __m256i, + b: __m128i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 1); let c = _mm256_inserti64x2::(a, b).as_i64x4(); @@ -2034,7 +2180,8 @@ pub fn _mm256_maskz_inserti64x2(k: __mmask8, a: __m256i, b: __m #[target_feature(enable = "avx512dq")] #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_inserti64x2(a: __m512i, b: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_inserti64x2(a: __m512i, b: __m128i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 2); let a = a.as_i64x8(); @@ -2058,7 +2205,8 @@ pub fn _mm512_inserti64x2(a: __m512i, b: __m128i) -> __m512i { #[cfg_attr(test, assert_instr(vinserti64x2, IMM8 = 3))] #[rustc_legacy_const_generics(4)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_inserti64x2( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_inserti64x2( src: __m512i, k: __mmask8, a: __m512i, @@ -2081,7 +2229,12 @@ pub fn _mm512_mask_inserti64x2( #[cfg_attr(test, assert_instr(vinserti64x2, IMM8 = 3))] #[rustc_legacy_const_generics(3)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_inserti64x2(k: __mmask8, a: __m512i, b: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_inserti64x2( + k: __mmask8, + a: __m512i, + b: __m128i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 2); let c = _mm512_inserti64x2::(a, b).as_i64x8(); @@ -4333,7 +4486,8 @@ pub fn _mm512_maskz_cvttps_epu64(k: __mmask8, a: __m256) -> __m512i { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vpmullq))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mullo_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mullo_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_mul(a.as_i64x2(), b.as_i64x2())) } } @@ -4346,7 +4500,8 @@ pub fn _mm_mullo_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vpmullq))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_mask_mullo_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mullo_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let b = _mm_mullo_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, b, src.as_i64x2())) @@ -4362,7 +4517,8 @@ pub fn _mm_mask_mullo_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) - #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vpmullq))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_maskz_mullo_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mullo_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let b = _mm_mullo_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, b, i64x2::ZERO)) @@ -4377,7 +4533,8 @@ pub fn _mm_maskz_mullo_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vpmullq))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mullo_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mullo_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_mul(a.as_i64x4(), b.as_i64x4())) } } @@ -4390,7 +4547,8 @@ pub fn _mm256_mullo_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vpmullq))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_mullo_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mullo_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let b = _mm256_mullo_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, b, src.as_i64x4())) @@ -4406,7 +4564,8 @@ pub fn _mm256_mask_mullo_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vpmullq))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_mullo_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mullo_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let b = _mm256_mullo_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, b, i64x4::ZERO)) @@ -4421,7 +4580,8 @@ pub fn _mm256_maskz_mullo_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vpmullq))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mullo_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mullo_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_mul(a.as_i64x8(), b.as_i64x8())) } } @@ -4434,7 +4594,8 @@ pub fn _mm512_mullo_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vpmullq))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_mullo_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mullo_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let b = _mm512_mullo_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, b, src.as_i64x8())) @@ -4450,7 +4611,8 @@ pub fn _mm512_mask_mullo_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vpmullq))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_mullo_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mullo_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let b = _mm512_mullo_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, b, i64x8::ZERO)) @@ -4465,7 +4627,8 @@ pub fn _mm512_maskz_mullo_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _cvtmask8_u32(a: __mmask8) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _cvtmask8_u32(a: __mmask8) -> u32 { a as u32 } @@ -4475,7 +4638,8 @@ pub fn _cvtmask8_u32(a: __mmask8) -> u32 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _cvtu32_mask8(a: u32) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _cvtu32_mask8(a: u32) -> __mmask8 { a as __mmask8 } @@ -4485,7 +4649,8 @@ pub fn _cvtu32_mask8(a: u32) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kadd_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kadd_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { a.wrapping_add(b) } @@ -4495,7 +4660,8 @@ pub fn _kadd_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kadd_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kadd_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { a.wrapping_add(b) } @@ -4505,7 +4671,8 @@ pub fn _kadd_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kand_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kand_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { a & b } @@ -4515,7 +4682,8 @@ pub fn _kand_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kandn_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kandn_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { _knot_mask8(a) & b } @@ -4525,7 +4693,8 @@ pub fn _kandn_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _knot_mask8(a: __mmask8) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _knot_mask8(a: __mmask8) -> __mmask8 { a ^ 0b11111111 } @@ -4535,7 +4704,8 @@ pub fn _knot_mask8(a: __mmask8) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kor_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kor_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { a | b } @@ -4545,7 +4715,8 @@ pub fn _kor_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kxnor_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kxnor_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { _knot_mask8(_kxor_mask8(a, b)) } @@ -4555,7 +4726,8 @@ pub fn _kxnor_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kxor_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kxor_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { a ^ b } @@ -4566,7 +4738,8 @@ pub fn _kxor_mask8(a: __mmask8, b: __mmask8) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _kortest_mask8_u8(a: __mmask8, b: __mmask8, all_ones: *mut u8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _kortest_mask8_u8(a: __mmask8, b: __mmask8, all_ones: *mut u8) -> u8 { let tmp = _kor_mask8(a, b); *all_ones = (tmp == 0xff) as u8; (tmp == 0) as u8 @@ -4579,7 +4752,8 @@ pub unsafe fn _kortest_mask8_u8(a: __mmask8, b: __mmask8, all_ones: *mut u8) -> #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kortestc_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kortestc_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { (_kor_mask8(a, b) == 0xff) as u8 } @@ -4590,7 +4764,8 @@ pub fn _kortestc_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kortestz_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kortestz_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { (_kor_mask8(a, b) == 0) as u8 } @@ -4601,7 +4776,8 @@ pub fn _kortestz_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { #[target_feature(enable = "avx512dq")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kshiftli_mask8(a: __mmask8) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kshiftli_mask8(a: __mmask8) -> __mmask8 { a.unbounded_shl(COUNT) } @@ -4612,7 +4788,8 @@ pub fn _kshiftli_mask8(a: __mmask8) -> __mmask8 { #[target_feature(enable = "avx512dq")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kshiftri_mask8(a: __mmask8) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kshiftri_mask8(a: __mmask8) -> __mmask8 { a.unbounded_shr(COUNT) } @@ -4624,7 +4801,8 @@ pub fn _kshiftri_mask8(a: __mmask8) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _ktest_mask16_u8(a: __mmask16, b: __mmask16, and_not: *mut u8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _ktest_mask16_u8(a: __mmask16, b: __mmask16, and_not: *mut u8) -> u8 { *and_not = (_kandn_mask16(a, b) == 0) as u8; (_kand_mask16(a, b) == 0) as u8 } @@ -4637,7 +4815,8 @@ pub unsafe fn _ktest_mask16_u8(a: __mmask16, b: __mmask16, and_not: *mut u8) -> #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _ktest_mask8_u8(a: __mmask8, b: __mmask8, and_not: *mut u8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _ktest_mask8_u8(a: __mmask8, b: __mmask8, and_not: *mut u8) -> u8 { *and_not = (_kandn_mask8(a, b) == 0) as u8; (_kand_mask8(a, b) == 0) as u8 } @@ -4649,7 +4828,8 @@ pub unsafe fn _ktest_mask8_u8(a: __mmask8, b: __mmask8, and_not: *mut u8) -> u8 #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _ktestc_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _ktestc_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { (_kandn_mask16(a, b) == 0) as u8 } @@ -4660,7 +4840,8 @@ pub fn _ktestc_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _ktestc_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _ktestc_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { (_kandn_mask8(a, b) == 0) as u8 } @@ -4671,7 +4852,8 @@ pub fn _ktestc_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _ktestz_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _ktestz_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { (_kand_mask16(a, b) == 0) as u8 } @@ -4682,7 +4864,8 @@ pub fn _ktestz_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _ktestz_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _ktestz_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { (_kand_mask8(a, b) == 0) as u8 } @@ -4692,7 +4875,8 @@ pub fn _ktestz_mask8_u8(a: __mmask8, b: __mmask8) -> u8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _load_mask8(mem_addr: *const __mmask8) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _load_mask8(mem_addr: *const __mmask8) -> __mmask8 { *mem_addr } @@ -4702,7 +4886,8 @@ pub unsafe fn _load_mask8(mem_addr: *const __mmask8) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _store_mask8(mem_addr: *mut __mmask8, a: __mmask8) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _store_mask8(mem_addr: *mut __mmask8, a: __mmask8) { *mem_addr = a; } @@ -4713,7 +4898,8 @@ pub unsafe fn _store_mask8(mem_addr: *mut __mmask8, a: __mmask8) { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_movepi32_mask(a: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movepi32_mask(a: __m128i) -> __mmask8 { let zero = _mm_setzero_si128(); _mm_cmplt_epi32_mask(a, zero) } @@ -4725,7 +4911,8 @@ pub fn _mm_movepi32_mask(a: __m128i) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_movepi32_mask(a: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movepi32_mask(a: __m256i) -> __mmask8 { let zero = _mm256_setzero_si256(); _mm256_cmplt_epi32_mask(a, zero) } @@ -4737,7 +4924,8 @@ pub fn _mm256_movepi32_mask(a: __m256i) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_movepi32_mask(a: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_movepi32_mask(a: __m512i) -> __mmask16 { let zero = _mm512_setzero_si512(); _mm512_cmplt_epi32_mask(a, zero) } @@ -4749,7 +4937,8 @@ pub fn _mm512_movepi32_mask(a: __m512i) -> __mmask16 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_movepi64_mask(a: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movepi64_mask(a: __m128i) -> __mmask8 { let zero = _mm_setzero_si128(); _mm_cmplt_epi64_mask(a, zero) } @@ -4761,7 +4950,8 @@ pub fn _mm_movepi64_mask(a: __m128i) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_movepi64_mask(a: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movepi64_mask(a: __m256i) -> __mmask8 { let zero = _mm256_setzero_si256(); _mm256_cmplt_epi64_mask(a, zero) } @@ -4773,7 +4963,8 @@ pub fn _mm256_movepi64_mask(a: __m256i) -> __mmask8 { #[inline] #[target_feature(enable = "avx512dq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_movepi64_mask(a: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_movepi64_mask(a: __m512i) -> __mmask8 { let zero = _mm512_setzero_si512(); _mm512_cmplt_epi64_mask(a, zero) } @@ -4786,7 +4977,8 @@ pub fn _mm512_movepi64_mask(a: __m512i) -> __mmask8 { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vpmovm2d))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_movm_epi32(k: __mmask8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movm_epi32(k: __mmask8) -> __m128i { let ones = _mm_set1_epi32(-1); _mm_maskz_mov_epi32(k, ones) } @@ -4799,7 +4991,8 @@ pub fn _mm_movm_epi32(k: __mmask8) -> __m128i { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vpmovm2d))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_movm_epi32(k: __mmask8) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movm_epi32(k: __mmask8) -> __m256i { let ones = _mm256_set1_epi32(-1); _mm256_maskz_mov_epi32(k, ones) } @@ -4812,7 +5005,8 @@ pub fn _mm256_movm_epi32(k: __mmask8) -> __m256i { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vpmovm2d))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_movm_epi32(k: __mmask16) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_movm_epi32(k: __mmask16) -> __m512i { let ones = _mm512_set1_epi32(-1); _mm512_maskz_mov_epi32(k, ones) } @@ -4825,7 +5019,8 @@ pub fn _mm512_movm_epi32(k: __mmask16) -> __m512i { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vpmovm2q))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm_movm_epi64(k: __mmask8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movm_epi64(k: __mmask8) -> __m128i { let ones = _mm_set1_epi64x(-1); _mm_maskz_mov_epi64(k, ones) } @@ -4838,7 +5033,8 @@ pub fn _mm_movm_epi64(k: __mmask8) -> __m128i { #[target_feature(enable = "avx512dq,avx512vl")] #[cfg_attr(test, assert_instr(vpmovm2q))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_movm_epi64(k: __mmask8) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_movm_epi64(k: __mmask8) -> __m256i { let ones = _mm256_set1_epi64x(-1); _mm256_maskz_mov_epi64(k, ones) } @@ -4851,7 +5047,8 @@ pub fn _mm256_movm_epi64(k: __mmask8) -> __m256i { #[target_feature(enable = "avx512dq")] #[cfg_attr(test, assert_instr(vpmovm2q))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_movm_epi64(k: __mmask8) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_movm_epi64(k: __mmask8) -> __m512i { let ones = _mm512_set1_epi64(-1); _mm512_maskz_mov_epi64(k, ones) } @@ -7203,6 +7400,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { use super::*; + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; @@ -7226,7 +7424,7 @@ mod tests { const XOR_32: f32 = unsafe { transmute(0x66666666_u32) }; #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_mask_and_pd() { + const unsafe fn test_mm_mask_and_pd() { let a = _mm_set1_pd(OPRND1_64); let b = _mm_set1_pd(OPRND2_64); let src = _mm_set_pd(1., 2.); @@ -7236,7 +7434,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_maskz_and_pd() { + const unsafe fn test_mm_maskz_and_pd() { let a = _mm_set1_pd(OPRND1_64); let b = _mm_set1_pd(OPRND2_64); let r = _mm_maskz_and_pd(0b01, a, b); @@ -7245,7 +7443,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_and_pd() { + const unsafe fn test_mm256_mask_and_pd() { let a = _mm256_set1_pd(OPRND1_64); let b = _mm256_set1_pd(OPRND2_64); let src = _mm256_set_pd(1., 2., 3., 4.); @@ -7255,7 +7453,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_and_pd() { + const unsafe fn test_mm256_maskz_and_pd() { let a = _mm256_set1_pd(OPRND1_64); let b = _mm256_set1_pd(OPRND2_64); let r = _mm256_maskz_and_pd(0b0101, a, b); @@ -7264,7 +7462,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_and_pd() { + const unsafe fn test_mm512_and_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let r = _mm512_and_pd(a, b); @@ -7273,7 +7471,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_and_pd() { + const unsafe fn test_mm512_mask_and_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let src = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); @@ -7283,7 +7481,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_and_pd() { + const unsafe fn test_mm512_maskz_and_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let r = _mm512_maskz_and_pd(0b01010101, a, b); @@ -7292,7 +7490,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_mask_and_ps() { + const unsafe fn test_mm_mask_and_ps() { let a = _mm_set1_ps(OPRND1_32); let b = _mm_set1_ps(OPRND2_32); let src = _mm_set_ps(1., 2., 3., 4.); @@ -7302,7 +7500,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_maskz_and_ps() { + const unsafe fn test_mm_maskz_and_ps() { let a = _mm_set1_ps(OPRND1_32); let b = _mm_set1_ps(OPRND2_32); let r = _mm_maskz_and_ps(0b0101, a, b); @@ -7311,7 +7509,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_and_ps() { + const unsafe fn test_mm256_mask_and_ps() { let a = _mm256_set1_ps(OPRND1_32); let b = _mm256_set1_ps(OPRND2_32); let src = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); @@ -7321,7 +7519,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_and_ps() { + const unsafe fn test_mm256_maskz_and_ps() { let a = _mm256_set1_ps(OPRND1_32); let b = _mm256_set1_ps(OPRND2_32); let r = _mm256_maskz_and_ps(0b01010101, a, b); @@ -7330,7 +7528,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_and_ps() { + const unsafe fn test_mm512_and_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let r = _mm512_and_ps(a, b); @@ -7339,7 +7537,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_and_ps() { + const unsafe fn test_mm512_mask_and_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let src = _mm512_set_ps( @@ -7354,7 +7552,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_and_ps() { + const unsafe fn test_mm512_maskz_and_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let r = _mm512_maskz_and_ps(0b0101010101010101, a, b); @@ -7366,7 +7564,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_mask_andnot_pd() { + const unsafe fn test_mm_mask_andnot_pd() { let a = _mm_set1_pd(OPRND1_64); let b = _mm_set1_pd(OPRND2_64); let src = _mm_set_pd(1., 2.); @@ -7376,7 +7574,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_maskz_andnot_pd() { + const unsafe fn test_mm_maskz_andnot_pd() { let a = _mm_set1_pd(OPRND1_64); let b = _mm_set1_pd(OPRND2_64); let r = _mm_maskz_andnot_pd(0b01, a, b); @@ -7385,7 +7583,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_andnot_pd() { + const unsafe fn test_mm256_mask_andnot_pd() { let a = _mm256_set1_pd(OPRND1_64); let b = _mm256_set1_pd(OPRND2_64); let src = _mm256_set_pd(1., 2., 3., 4.); @@ -7395,7 +7593,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_andnot_pd() { + const unsafe fn test_mm256_maskz_andnot_pd() { let a = _mm256_set1_pd(OPRND1_64); let b = _mm256_set1_pd(OPRND2_64); let r = _mm256_maskz_andnot_pd(0b0101, a, b); @@ -7404,7 +7602,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_andnot_pd() { + const unsafe fn test_mm512_andnot_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let r = _mm512_andnot_pd(a, b); @@ -7413,7 +7611,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_andnot_pd() { + const unsafe fn test_mm512_mask_andnot_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let src = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); @@ -7423,7 +7621,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_andnot_pd() { + const unsafe fn test_mm512_maskz_andnot_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let r = _mm512_maskz_andnot_pd(0b01010101, a, b); @@ -7432,7 +7630,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_mask_andnot_ps() { + const unsafe fn test_mm_mask_andnot_ps() { let a = _mm_set1_ps(OPRND1_32); let b = _mm_set1_ps(OPRND2_32); let src = _mm_set_ps(1., 2., 3., 4.); @@ -7442,7 +7640,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_maskz_andnot_ps() { + const unsafe fn test_mm_maskz_andnot_ps() { let a = _mm_set1_ps(OPRND1_32); let b = _mm_set1_ps(OPRND2_32); let r = _mm_maskz_andnot_ps(0b0101, a, b); @@ -7451,7 +7649,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_andnot_ps() { + const unsafe fn test_mm256_mask_andnot_ps() { let a = _mm256_set1_ps(OPRND1_32); let b = _mm256_set1_ps(OPRND2_32); let src = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); @@ -7461,7 +7659,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_andnot_ps() { + const unsafe fn test_mm256_maskz_andnot_ps() { let a = _mm256_set1_ps(OPRND1_32); let b = _mm256_set1_ps(OPRND2_32); let r = _mm256_maskz_andnot_ps(0b01010101, a, b); @@ -7470,7 +7668,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_andnot_ps() { + const unsafe fn test_mm512_andnot_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let r = _mm512_andnot_ps(a, b); @@ -7479,7 +7677,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_andnot_ps() { + const unsafe fn test_mm512_mask_andnot_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let src = _mm512_set_ps( @@ -7494,7 +7692,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_andnot_ps() { + const unsafe fn test_mm512_maskz_andnot_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let r = _mm512_maskz_andnot_ps(0b0101010101010101, a, b); @@ -7506,7 +7704,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_mask_or_pd() { + const unsafe fn test_mm_mask_or_pd() { let a = _mm_set1_pd(OPRND1_64); let b = _mm_set1_pd(OPRND2_64); let src = _mm_set_pd(1., 2.); @@ -7516,7 +7714,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_maskz_or_pd() { + const unsafe fn test_mm_maskz_or_pd() { let a = _mm_set1_pd(OPRND1_64); let b = _mm_set1_pd(OPRND2_64); let r = _mm_maskz_or_pd(0b01, a, b); @@ -7525,7 +7723,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_or_pd() { + const unsafe fn test_mm256_mask_or_pd() { let a = _mm256_set1_pd(OPRND1_64); let b = _mm256_set1_pd(OPRND2_64); let src = _mm256_set_pd(1., 2., 3., 4.); @@ -7535,7 +7733,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_or_pd() { + const unsafe fn test_mm256_maskz_or_pd() { let a = _mm256_set1_pd(OPRND1_64); let b = _mm256_set1_pd(OPRND2_64); let r = _mm256_maskz_or_pd(0b0101, a, b); @@ -7544,7 +7742,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_or_pd() { + const unsafe fn test_mm512_or_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let r = _mm512_or_pd(a, b); @@ -7553,7 +7751,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_or_pd() { + const unsafe fn test_mm512_mask_or_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let src = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); @@ -7563,7 +7761,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_or_pd() { + const unsafe fn test_mm512_maskz_or_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let r = _mm512_maskz_or_pd(0b01010101, a, b); @@ -7572,7 +7770,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_mask_or_ps() { + const unsafe fn test_mm_mask_or_ps() { let a = _mm_set1_ps(OPRND1_32); let b = _mm_set1_ps(OPRND2_32); let src = _mm_set_ps(1., 2., 3., 4.); @@ -7582,7 +7780,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_maskz_or_ps() { + const unsafe fn test_mm_maskz_or_ps() { let a = _mm_set1_ps(OPRND1_32); let b = _mm_set1_ps(OPRND2_32); let r = _mm_maskz_or_ps(0b0101, a, b); @@ -7591,7 +7789,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_or_ps() { + const unsafe fn test_mm256_mask_or_ps() { let a = _mm256_set1_ps(OPRND1_32); let b = _mm256_set1_ps(OPRND2_32); let src = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); @@ -7601,7 +7799,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_or_ps() { + const unsafe fn test_mm256_maskz_or_ps() { let a = _mm256_set1_ps(OPRND1_32); let b = _mm256_set1_ps(OPRND2_32); let r = _mm256_maskz_or_ps(0b01010101, a, b); @@ -7610,7 +7808,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_or_ps() { + const unsafe fn test_mm512_or_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let r = _mm512_or_ps(a, b); @@ -7619,7 +7817,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_or_ps() { + const unsafe fn test_mm512_mask_or_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let src = _mm512_set_ps( @@ -7634,7 +7832,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_or_ps() { + const unsafe fn test_mm512_maskz_or_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let r = _mm512_maskz_or_ps(0b0101010101010101, a, b); @@ -7645,7 +7843,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_mask_xor_pd() { + const unsafe fn test_mm_mask_xor_pd() { let a = _mm_set1_pd(OPRND1_64); let b = _mm_set1_pd(OPRND2_64); let src = _mm_set_pd(1., 2.); @@ -7655,7 +7853,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_maskz_xor_pd() { + const unsafe fn test_mm_maskz_xor_pd() { let a = _mm_set1_pd(OPRND1_64); let b = _mm_set1_pd(OPRND2_64); let r = _mm_maskz_xor_pd(0b01, a, b); @@ -7664,7 +7862,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_xor_pd() { + const unsafe fn test_mm256_mask_xor_pd() { let a = _mm256_set1_pd(OPRND1_64); let b = _mm256_set1_pd(OPRND2_64); let src = _mm256_set_pd(1., 2., 3., 4.); @@ -7674,7 +7872,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_xor_pd() { + const unsafe fn test_mm256_maskz_xor_pd() { let a = _mm256_set1_pd(OPRND1_64); let b = _mm256_set1_pd(OPRND2_64); let r = _mm256_maskz_xor_pd(0b0101, a, b); @@ -7683,7 +7881,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_xor_pd() { + const unsafe fn test_mm512_xor_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let r = _mm512_xor_pd(a, b); @@ -7692,7 +7890,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_xor_pd() { + const unsafe fn test_mm512_mask_xor_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let src = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); @@ -7702,7 +7900,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_xor_pd() { + const unsafe fn test_mm512_maskz_xor_pd() { let a = _mm512_set1_pd(OPRND1_64); let b = _mm512_set1_pd(OPRND2_64); let r = _mm512_maskz_xor_pd(0b01010101, a, b); @@ -7711,7 +7909,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_mask_xor_ps() { + const unsafe fn test_mm_mask_xor_ps() { let a = _mm_set1_ps(OPRND1_32); let b = _mm_set1_ps(OPRND2_32); let src = _mm_set_ps(1., 2., 3., 4.); @@ -7721,7 +7919,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_maskz_xor_ps() { + const unsafe fn test_mm_maskz_xor_ps() { let a = _mm_set1_ps(OPRND1_32); let b = _mm_set1_ps(OPRND2_32); let r = _mm_maskz_xor_ps(0b0101, a, b); @@ -7730,7 +7928,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_xor_ps() { + const unsafe fn test_mm256_mask_xor_ps() { let a = _mm256_set1_ps(OPRND1_32); let b = _mm256_set1_ps(OPRND2_32); let src = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); @@ -7740,7 +7938,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_xor_ps() { + const unsafe fn test_mm256_maskz_xor_ps() { let a = _mm256_set1_ps(OPRND1_32); let b = _mm256_set1_ps(OPRND2_32); let r = _mm256_maskz_xor_ps(0b01010101, a, b); @@ -7749,7 +7947,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_xor_ps() { + const unsafe fn test_mm512_xor_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let r = _mm512_xor_ps(a, b); @@ -7758,7 +7956,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_xor_ps() { + const unsafe fn test_mm512_mask_xor_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let src = _mm512_set_ps( @@ -7773,7 +7971,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_xor_ps() { + const unsafe fn test_mm512_maskz_xor_ps() { let a = _mm512_set1_ps(OPRND1_32); let b = _mm512_set1_ps(OPRND2_32); let r = _mm512_maskz_xor_ps(0b0101010101010101, a, b); @@ -7785,7 +7983,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_broadcast_f32x2() { + const unsafe fn test_mm256_broadcast_f32x2() { let a = _mm_set_ps(1., 2., 3., 4.); let r = _mm256_broadcast_f32x2(a); let e = _mm256_set_ps(3., 4., 3., 4., 3., 4., 3., 4.); @@ -7793,7 +7991,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_broadcast_f32x2() { + const unsafe fn test_mm256_mask_broadcast_f32x2() { let a = _mm_set_ps(1., 2., 3., 4.); let b = _mm256_set_ps(5., 6., 7., 8., 9., 10., 11., 12.); let r = _mm256_mask_broadcast_f32x2(b, 0b01101001, a); @@ -7802,7 +8000,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_broadcast_f32x2() { + const unsafe fn test_mm256_maskz_broadcast_f32x2() { let a = _mm_set_ps(1., 2., 3., 4.); let r = _mm256_maskz_broadcast_f32x2(0b01101001, a); let e = _mm256_set_ps(0., 4., 3., 0., 3., 0., 0., 4.); @@ -7810,7 +8008,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_broadcast_f32x2() { + const unsafe fn test_mm512_broadcast_f32x2() { let a = _mm_set_ps(1., 2., 3., 4.); let r = _mm512_broadcast_f32x2(a); let e = _mm512_set_ps( @@ -7820,7 +8018,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_broadcast_f32x2() { + const unsafe fn test_mm512_mask_broadcast_f32x2() { let a = _mm_set_ps(1., 2., 3., 4.); let b = _mm512_set_ps( 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., @@ -7833,7 +8031,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_broadcast_f32x2() { + const unsafe fn test_mm512_maskz_broadcast_f32x2() { let a = _mm_set_ps(1., 2., 3., 4.); let r = _mm512_maskz_broadcast_f32x2(0b0110100100111100, a); let e = _mm512_set_ps( @@ -7843,7 +8041,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_broadcast_f32x8() { + const unsafe fn test_mm512_broadcast_f32x8() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm512_broadcast_f32x8(a); let e = _mm512_set_ps( @@ -7853,7 +8051,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_broadcast_f32x8() { + const unsafe fn test_mm512_mask_broadcast_f32x8() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm512_set_ps( 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., @@ -7866,7 +8064,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_broadcast_f32x8() { + const unsafe fn test_mm512_maskz_broadcast_f32x8() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm512_maskz_broadcast_f32x8(0b0110100100111100, a); let e = _mm512_set_ps( @@ -7876,7 +8074,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_broadcast_f64x2() { + const unsafe fn test_mm256_broadcast_f64x2() { let a = _mm_set_pd(1., 2.); let r = _mm256_broadcast_f64x2(a); let e = _mm256_set_pd(1., 2., 1., 2.); @@ -7884,7 +8082,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_broadcast_f64x2() { + const unsafe fn test_mm256_mask_broadcast_f64x2() { let a = _mm_set_pd(1., 2.); let b = _mm256_set_pd(3., 4., 5., 6.); let r = _mm256_mask_broadcast_f64x2(b, 0b0110, a); @@ -7893,7 +8091,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_broadcast_f64x2() { + const unsafe fn test_mm256_maskz_broadcast_f64x2() { let a = _mm_set_pd(1., 2.); let r = _mm256_maskz_broadcast_f64x2(0b0110, a); let e = _mm256_set_pd(0., 2., 1., 0.); @@ -7901,7 +8099,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_broadcast_f64x2() { + const unsafe fn test_mm512_broadcast_f64x2() { let a = _mm_set_pd(1., 2.); let r = _mm512_broadcast_f64x2(a); let e = _mm512_set_pd(1., 2., 1., 2., 1., 2., 1., 2.); @@ -7909,7 +8107,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_broadcast_f64x2() { + const unsafe fn test_mm512_mask_broadcast_f64x2() { let a = _mm_set_pd(1., 2.); let b = _mm512_set_pd(3., 4., 5., 6., 7., 8., 9., 10.); let r = _mm512_mask_broadcast_f64x2(b, 0b01101001, a); @@ -7918,7 +8116,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_broadcast_f64x2() { + const unsafe fn test_mm512_maskz_broadcast_f64x2() { let a = _mm_set_pd(1., 2.); let r = _mm512_maskz_broadcast_f64x2(0b01101001, a); let e = _mm512_set_pd(0., 2., 1., 0., 1., 0., 0., 2.); @@ -7926,7 +8124,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_broadcast_i32x2() { + const unsafe fn test_mm_broadcast_i32x2() { let a = _mm_set_epi32(1, 2, 3, 4); let r = _mm_broadcast_i32x2(a); let e = _mm_set_epi32(3, 4, 3, 4); @@ -7934,7 +8132,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_mask_broadcast_i32x2() { + const unsafe fn test_mm_mask_broadcast_i32x2() { let a = _mm_set_epi32(1, 2, 3, 4); let b = _mm_set_epi32(5, 6, 7, 8); let r = _mm_mask_broadcast_i32x2(b, 0b0110, a); @@ -7943,7 +8141,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_maskz_broadcast_i32x2() { + const unsafe fn test_mm_maskz_broadcast_i32x2() { let a = _mm_set_epi32(1, 2, 3, 4); let r = _mm_maskz_broadcast_i32x2(0b0110, a); let e = _mm_set_epi32(0, 4, 3, 0); @@ -7951,7 +8149,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_broadcast_i32x2() { + const unsafe fn test_mm256_broadcast_i32x2() { let a = _mm_set_epi32(1, 2, 3, 4); let r = _mm256_broadcast_i32x2(a); let e = _mm256_set_epi32(3, 4, 3, 4, 3, 4, 3, 4); @@ -7959,7 +8157,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_broadcast_i32x2() { + const unsafe fn test_mm256_mask_broadcast_i32x2() { let a = _mm_set_epi32(1, 2, 3, 4); let b = _mm256_set_epi32(5, 6, 7, 8, 9, 10, 11, 12); let r = _mm256_mask_broadcast_i32x2(b, 0b01101001, a); @@ -7968,7 +8166,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_broadcast_i32x2() { + const unsafe fn test_mm256_maskz_broadcast_i32x2() { let a = _mm_set_epi32(1, 2, 3, 4); let r = _mm256_maskz_broadcast_i32x2(0b01101001, a); let e = _mm256_set_epi32(0, 4, 3, 0, 3, 0, 0, 4); @@ -7976,7 +8174,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_broadcast_i32x2() { + const unsafe fn test_mm512_broadcast_i32x2() { let a = _mm_set_epi32(1, 2, 3, 4); let r = _mm512_broadcast_i32x2(a); let e = _mm512_set_epi32(3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4); @@ -7984,7 +8182,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_broadcast_i32x2() { + const unsafe fn test_mm512_mask_broadcast_i32x2() { let a = _mm_set_epi32(1, 2, 3, 4); let b = _mm512_set_epi32(5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20); let r = _mm512_mask_broadcast_i32x2(b, 0b0110100100111100, a); @@ -7993,7 +8191,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_broadcast_i32x2() { + const unsafe fn test_mm512_maskz_broadcast_i32x2() { let a = _mm_set_epi32(1, 2, 3, 4); let r = _mm512_maskz_broadcast_i32x2(0b0110100100111100, a); let e = _mm512_set_epi32(0, 4, 3, 0, 3, 0, 0, 4, 0, 0, 3, 4, 3, 4, 0, 0); @@ -8001,7 +8199,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_broadcast_i32x8() { + const unsafe fn test_mm512_broadcast_i32x8() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_broadcast_i32x8(a); let e = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8); @@ -8009,7 +8207,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_broadcast_i32x8() { + const unsafe fn test_mm512_mask_broadcast_i32x8() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm512_set_epi32( 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, @@ -8020,7 +8218,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_broadcast_i32x8() { + const unsafe fn test_mm512_maskz_broadcast_i32x8() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_maskz_broadcast_i32x8(0b0110100100111100, a); let e = _mm512_set_epi32(0, 2, 3, 0, 5, 0, 0, 8, 0, 0, 3, 4, 5, 6, 0, 0); @@ -8028,7 +8226,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_broadcast_i64x2() { + const unsafe fn test_mm256_broadcast_i64x2() { let a = _mm_set_epi64x(1, 2); let r = _mm256_broadcast_i64x2(a); let e = _mm256_set_epi64x(1, 2, 1, 2); @@ -8036,7 +8234,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_broadcast_i64x2() { + const unsafe fn test_mm256_mask_broadcast_i64x2() { let a = _mm_set_epi64x(1, 2); let b = _mm256_set_epi64x(3, 4, 5, 6); let r = _mm256_mask_broadcast_i64x2(b, 0b0110, a); @@ -8045,7 +8243,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_broadcast_i64x2() { + const unsafe fn test_mm256_maskz_broadcast_i64x2() { let a = _mm_set_epi64x(1, 2); let r = _mm256_maskz_broadcast_i64x2(0b0110, a); let e = _mm256_set_epi64x(0, 2, 1, 0); @@ -8053,7 +8251,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_broadcast_i64x2() { + const unsafe fn test_mm512_broadcast_i64x2() { let a = _mm_set_epi64x(1, 2); let r = _mm512_broadcast_i64x2(a); let e = _mm512_set_epi64(1, 2, 1, 2, 1, 2, 1, 2); @@ -8061,7 +8259,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_broadcast_i64x2() { + const unsafe fn test_mm512_mask_broadcast_i64x2() { let a = _mm_set_epi64x(1, 2); let b = _mm512_set_epi64(3, 4, 5, 6, 7, 8, 9, 10); let r = _mm512_mask_broadcast_i64x2(b, 0b01101001, a); @@ -8070,7 +8268,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_broadcast_i64x2() { + const unsafe fn test_mm512_maskz_broadcast_i64x2() { let a = _mm_set_epi64x(1, 2); let r = _mm512_maskz_broadcast_i64x2(0b01101001, a); let e = _mm512_set_epi64(0, 2, 1, 0, 1, 0, 0, 2); @@ -8078,7 +8276,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_extractf32x8_ps() { + const unsafe fn test_mm512_extractf32x8_ps() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -8088,7 +8286,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_extractf32x8_ps() { + const unsafe fn test_mm512_mask_extractf32x8_ps() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -8099,7 +8297,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_extractf32x8_ps() { + const unsafe fn test_mm512_maskz_extractf32x8_ps() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -8109,7 +8307,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_extractf64x2_pd() { + const unsafe fn test_mm256_extractf64x2_pd() { let a = _mm256_set_pd(1., 2., 3., 4.); let r = _mm256_extractf64x2_pd::<1>(a); let e = _mm_set_pd(1., 2.); @@ -8117,7 +8315,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_extractf64x2_pd() { + const unsafe fn test_mm256_mask_extractf64x2_pd() { let a = _mm256_set_pd(1., 2., 3., 4.); let b = _mm_set_pd(5., 6.); let r = _mm256_mask_extractf64x2_pd::<1>(b, 0b01, a); @@ -8126,7 +8324,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_extractf64x2_pd() { + const unsafe fn test_mm256_maskz_extractf64x2_pd() { let a = _mm256_set_pd(1., 2., 3., 4.); let r = _mm256_maskz_extractf64x2_pd::<1>(0b01, a); let e = _mm_set_pd(0., 2.); @@ -8134,7 +8332,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_extractf64x2_pd() { + const unsafe fn test_mm512_extractf64x2_pd() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm512_extractf64x2_pd::<2>(a); let e = _mm_set_pd(3., 4.); @@ -8142,7 +8340,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_extractf64x2_pd() { + const unsafe fn test_mm512_mask_extractf64x2_pd() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm_set_pd(9., 10.); let r = _mm512_mask_extractf64x2_pd::<2>(b, 0b01, a); @@ -8151,7 +8349,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_extractf64x2_pd() { + const unsafe fn test_mm512_maskz_extractf64x2_pd() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm512_maskz_extractf64x2_pd::<2>(0b01, a); let e = _mm_set_pd(0., 4.); @@ -8159,7 +8357,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_extracti32x8_epi32() { + const unsafe fn test_mm512_extracti32x8_epi32() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_extracti32x8_epi32::<1>(a); let e = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); @@ -8167,7 +8365,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_extracti32x8_epi32() { + const unsafe fn test_mm512_mask_extracti32x8_epi32() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm256_set_epi32(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_mask_extracti32x8_epi32::<1>(b, 0b01101001, a); @@ -8176,7 +8374,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_extracti32x8_epi32() { + const unsafe fn test_mm512_maskz_extracti32x8_epi32() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_maskz_extracti32x8_epi32::<1>(0b01101001, a); let e = _mm256_set_epi32(0, 2, 3, 0, 5, 0, 0, 8); @@ -8184,7 +8382,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_extracti64x2_epi64() { + const unsafe fn test_mm256_extracti64x2_epi64() { let a = _mm256_set_epi64x(1, 2, 3, 4); let r = _mm256_extracti64x2_epi64::<1>(a); let e = _mm_set_epi64x(1, 2); @@ -8192,7 +8390,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_extracti64x2_epi64() { + const unsafe fn test_mm256_mask_extracti64x2_epi64() { let a = _mm256_set_epi64x(1, 2, 3, 4); let b = _mm_set_epi64x(5, 6); let r = _mm256_mask_extracti64x2_epi64::<1>(b, 0b01, a); @@ -8201,7 +8399,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_extracti64x2_epi64() { + const unsafe fn test_mm256_maskz_extracti64x2_epi64() { let a = _mm256_set_epi64x(1, 2, 3, 4); let r = _mm256_maskz_extracti64x2_epi64::<1>(0b01, a); let e = _mm_set_epi64x(0, 2); @@ -8209,7 +8407,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_extracti64x2_epi64() { + const unsafe fn test_mm512_extracti64x2_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_extracti64x2_epi64::<2>(a); let e = _mm_set_epi64x(3, 4); @@ -8217,7 +8415,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_extracti64x2_epi64() { + const unsafe fn test_mm512_mask_extracti64x2_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_set_epi64x(9, 10); let r = _mm512_mask_extracti64x2_epi64::<2>(b, 0b01, a); @@ -8226,7 +8424,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_extracti64x2_epi64() { + const unsafe fn test_mm512_maskz_extracti64x2_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_maskz_extracti64x2_epi64::<2>(0b01, a); let e = _mm_set_epi64x(0, 4); @@ -8234,7 +8432,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_insertf32x8() { + const unsafe fn test_mm512_insertf32x8() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -8247,7 +8445,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_insertf32x8() { + const unsafe fn test_mm512_mask_insertf32x8() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -8263,7 +8461,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_insertf32x8() { + const unsafe fn test_mm512_maskz_insertf32x8() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -8276,7 +8474,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_insertf64x2() { + const unsafe fn test_mm256_insertf64x2() { let a = _mm256_set_pd(1., 2., 3., 4.); let b = _mm_set_pd(5., 6.); let r = _mm256_insertf64x2::<1>(a, b); @@ -8285,7 +8483,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_insertf64x2() { + const unsafe fn test_mm256_mask_insertf64x2() { let a = _mm256_set_pd(1., 2., 3., 4.); let b = _mm_set_pd(5., 6.); let src = _mm256_set_pd(7., 8., 9., 10.); @@ -8295,7 +8493,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_insertf64x2() { + const unsafe fn test_mm256_maskz_insertf64x2() { let a = _mm256_set_pd(1., 2., 3., 4.); let b = _mm_set_pd(5., 6.); let r = _mm256_maskz_insertf64x2::<1>(0b0110, a, b); @@ -8304,7 +8502,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_insertf64x2() { + const unsafe fn test_mm512_insertf64x2() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm_set_pd(9., 10.); let r = _mm512_insertf64x2::<2>(a, b); @@ -8313,7 +8511,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_insertf64x2() { + const unsafe fn test_mm512_mask_insertf64x2() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm_set_pd(9., 10.); let src = _mm512_set_pd(11., 12., 13., 14., 15., 16., 17., 18.); @@ -8323,7 +8521,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_insertf64x2() { + const unsafe fn test_mm512_maskz_insertf64x2() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm_set_pd(9., 10.); let r = _mm512_maskz_insertf64x2::<2>(0b01101001, a, b); @@ -8332,7 +8530,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_inserti32x8() { + const unsafe fn test_mm512_inserti32x8() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm256_set_epi32(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_inserti32x8::<1>(a, b); @@ -8343,7 +8541,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_inserti32x8() { + const unsafe fn test_mm512_mask_inserti32x8() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm256_set_epi32(17, 18, 19, 20, 21, 22, 23, 24); let src = _mm512_set_epi32( @@ -8357,7 +8555,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_inserti32x8() { + const unsafe fn test_mm512_maskz_inserti32x8() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm256_set_epi32(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_maskz_inserti32x8::<1>(0b0110100100111100, a, b); @@ -8366,7 +8564,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_inserti64x2() { + const unsafe fn test_mm256_inserti64x2() { let a = _mm256_set_epi64x(1, 2, 3, 4); let b = _mm_set_epi64x(5, 6); let r = _mm256_inserti64x2::<1>(a, b); @@ -8375,7 +8573,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_inserti64x2() { + const unsafe fn test_mm256_mask_inserti64x2() { let a = _mm256_set_epi64x(1, 2, 3, 4); let b = _mm_set_epi64x(5, 6); let src = _mm256_set_epi64x(7, 8, 9, 10); @@ -8385,7 +8583,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_inserti64x2() { + const unsafe fn test_mm256_maskz_inserti64x2() { let a = _mm256_set_epi64x(1, 2, 3, 4); let b = _mm_set_epi64x(5, 6); let r = _mm256_maskz_inserti64x2::<1>(0b0110, a, b); @@ -8394,7 +8592,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_inserti64x2() { + const unsafe fn test_mm512_inserti64x2() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_set_epi64x(9, 10); let r = _mm512_inserti64x2::<2>(a, b); @@ -8403,7 +8601,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_inserti64x2() { + const unsafe fn test_mm512_mask_inserti64x2() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_set_epi64x(9, 10); let src = _mm512_set_epi64(11, 12, 13, 14, 15, 16, 17, 18); @@ -8413,7 +8611,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_inserti64x2() { + const unsafe fn test_mm512_maskz_inserti64x2() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_set_epi64x(9, 10); let r = _mm512_maskz_inserti64x2::<2>(0b01101001, a, b); @@ -9654,7 +9852,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_mullo_epi64() { + const unsafe fn test_mm_mullo_epi64() { let a = _mm_set_epi64x(1, 2); let b = _mm_set_epi64x(3, 4); let r = _mm_mullo_epi64(a, b); @@ -9663,7 +9861,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_mask_mullo_epi64() { + const unsafe fn test_mm_mask_mullo_epi64() { let a = _mm_set_epi64x(1, 2); let b = _mm_set_epi64x(3, 4); let c = _mm_set_epi64x(5, 6); @@ -9673,7 +9871,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_maskz_mullo_epi64() { + const unsafe fn test_mm_maskz_mullo_epi64() { let a = _mm_set_epi64x(1, 2); let b = _mm_set_epi64x(3, 4); let r = _mm_maskz_mullo_epi64(0b01, a, b); @@ -9682,7 +9880,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mullo_epi64() { + const unsafe fn test_mm256_mullo_epi64() { let a = _mm256_set_epi64x(1, 2, 3, 4); let b = _mm256_set_epi64x(5, 6, 7, 8); let r = _mm256_mullo_epi64(a, b); @@ -9691,7 +9889,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_mask_mullo_epi64() { + const unsafe fn test_mm256_mask_mullo_epi64() { let a = _mm256_set_epi64x(1, 2, 3, 4); let b = _mm256_set_epi64x(5, 6, 7, 8); let c = _mm256_set_epi64x(9, 10, 11, 12); @@ -9701,7 +9899,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_maskz_mullo_epi64() { + const unsafe fn test_mm256_maskz_mullo_epi64() { let a = _mm256_set_epi64x(1, 2, 3, 4); let b = _mm256_set_epi64x(5, 6, 7, 8); let r = _mm256_maskz_mullo_epi64(0b0110, a, b); @@ -9710,7 +9908,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mullo_epi64() { + const unsafe fn test_mm512_mullo_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm512_set_epi64(9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_mullo_epi64(a, b); @@ -9719,7 +9917,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_mask_mullo_epi64() { + const unsafe fn test_mm512_mask_mullo_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm512_set_epi64(9, 10, 11, 12, 13, 14, 15, 16); let c = _mm512_set_epi64(17, 18, 19, 20, 21, 22, 23, 24); @@ -9729,7 +9927,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_maskz_mullo_epi64() { + const unsafe fn test_mm512_maskz_mullo_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm512_set_epi64(9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_maskz_mullo_epi64(0b01101001, a, b); @@ -9738,7 +9936,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_cvtmask8_u32() { + const unsafe fn test_cvtmask8_u32() { let a: __mmask8 = 0b01101001; let r = _cvtmask8_u32(a); let e: u32 = 0b01101001; @@ -9746,7 +9944,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_cvtu32_mask8() { + const unsafe fn test_cvtu32_mask8() { let a: u32 = 0b01101001; let r = _cvtu32_mask8(a); let e: __mmask8 = 0b01101001; @@ -9754,7 +9952,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kadd_mask16() { + const unsafe fn test_kadd_mask16() { let a: __mmask16 = 27549; let b: __mmask16 = 23434; let r = _kadd_mask16(a, b); @@ -9763,7 +9961,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kadd_mask8() { + const unsafe fn test_kadd_mask8() { let a: __mmask8 = 98; let b: __mmask8 = 117; let r = _kadd_mask8(a, b); @@ -9772,7 +9970,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kand_mask8() { + const unsafe fn test_kand_mask8() { let a: __mmask8 = 0b01101001; let b: __mmask8 = 0b10110011; let r = _kand_mask8(a, b); @@ -9781,7 +9979,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kandn_mask8() { + const unsafe fn test_kandn_mask8() { let a: __mmask8 = 0b01101001; let b: __mmask8 = 0b10110011; let r = _kandn_mask8(a, b); @@ -9790,7 +9988,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_knot_mask8() { + const unsafe fn test_knot_mask8() { let a: __mmask8 = 0b01101001; let r = _knot_mask8(a); let e: __mmask8 = 0b10010110; @@ -9798,7 +9996,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kor_mask8() { + const unsafe fn test_kor_mask8() { let a: __mmask8 = 0b01101001; let b: __mmask8 = 0b10110011; let r = _kor_mask8(a, b); @@ -9807,7 +10005,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kxnor_mask8() { + const unsafe fn test_kxnor_mask8() { let a: __mmask8 = 0b01101001; let b: __mmask8 = 0b10110011; let r = _kxnor_mask8(a, b); @@ -9816,7 +10014,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kxor_mask8() { + const unsafe fn test_kxor_mask8() { let a: __mmask8 = 0b01101001; let b: __mmask8 = 0b10110011; let r = _kxor_mask8(a, b); @@ -9825,7 +10023,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kortest_mask8_u8() { + const unsafe fn test_kortest_mask8_u8() { let a: __mmask8 = 0b01101001; let b: __mmask8 = 0b10110110; let mut all_ones: u8 = 0; @@ -9835,7 +10033,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kortestc_mask8_u8() { + const unsafe fn test_kortestc_mask8_u8() { let a: __mmask8 = 0b01101001; let b: __mmask8 = 0b10110110; let r = _kortestc_mask8_u8(a, b); @@ -9843,7 +10041,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kortestz_mask8_u8() { + const unsafe fn test_kortestz_mask8_u8() { let a: __mmask8 = 0b01101001; let b: __mmask8 = 0b10110110; let r = _kortestz_mask8_u8(a, b); @@ -9851,7 +10049,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kshiftli_mask8() { + const unsafe fn test_kshiftli_mask8() { let a: __mmask8 = 0b01101001; let r = _kshiftli_mask8::<3>(a); let e: __mmask8 = 0b01001000; @@ -9871,7 +10069,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_kshiftri_mask8() { + const unsafe fn test_kshiftri_mask8() { let a: __mmask8 = 0b10101001; let r = _kshiftri_mask8::<3>(a); let e: __mmask8 = 0b00010101; @@ -9891,7 +10089,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_ktest_mask8_u8() { + const unsafe fn test_ktest_mask8_u8() { let a: __mmask8 = 0b01101001; let b: __mmask8 = 0b10010110; let mut and_not: u8 = 0; @@ -9901,7 +10099,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_ktestc_mask8_u8() { + const unsafe fn test_ktestc_mask8_u8() { let a: __mmask8 = 0b01101001; let b: __mmask8 = 0b10010110; let r = _ktestc_mask8_u8(a, b); @@ -9909,7 +10107,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_ktestz_mask8_u8() { + const unsafe fn test_ktestz_mask8_u8() { let a: __mmask8 = 0b01101001; let b: __mmask8 = 0b10010110; let r = _ktestz_mask8_u8(a, b); @@ -9917,7 +10115,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_ktest_mask16_u8() { + const unsafe fn test_ktest_mask16_u8() { let a: __mmask16 = 0b0110100100111100; let b: __mmask16 = 0b1001011011000011; let mut and_not: u8 = 0; @@ -9927,7 +10125,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_ktestc_mask16_u8() { + const unsafe fn test_ktestc_mask16_u8() { let a: __mmask16 = 0b0110100100111100; let b: __mmask16 = 0b1001011011000011; let r = _ktestc_mask16_u8(a, b); @@ -9935,7 +10133,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_ktestz_mask16_u8() { + const unsafe fn test_ktestz_mask16_u8() { let a: __mmask16 = 0b0110100100111100; let b: __mmask16 = 0b1001011011000011; let r = _ktestz_mask16_u8(a, b); @@ -9943,7 +10141,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_load_mask8() { + const unsafe fn test_load_mask8() { let a: __mmask8 = 0b01101001; let r = _load_mask8(&a); let e: __mmask8 = 0b01101001; @@ -9951,7 +10149,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_store_mask8() { + const unsafe fn test_store_mask8() { let a: __mmask8 = 0b01101001; let mut r = 0; _store_mask8(&mut r, a); @@ -9960,7 +10158,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_movepi32_mask() { + const unsafe fn test_mm_movepi32_mask() { let a = _mm_set_epi32(0, -2, -3, 4); let r = _mm_movepi32_mask(a); let e = 0b0110; @@ -9968,7 +10166,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_movepi32_mask() { + const unsafe fn test_mm256_movepi32_mask() { let a = _mm256_set_epi32(0, -2, -3, 4, -5, 6, 7, -8); let r = _mm256_movepi32_mask(a); let e = 0b01101001; @@ -9976,7 +10174,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_movepi32_mask() { + const unsafe fn test_mm512_movepi32_mask() { let a = _mm512_set_epi32( 0, -2, -3, 4, -5, 6, 7, -8, 9, 10, -11, -12, -13, -14, 15, 16, ); @@ -9986,7 +10184,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_movepi64_mask() { + const unsafe fn test_mm_movepi64_mask() { let a = _mm_set_epi64x(0, -2); let r = _mm_movepi64_mask(a); let e = 0b01; @@ -9994,7 +10192,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_movepi64_mask() { + const unsafe fn test_mm256_movepi64_mask() { let a = _mm256_set_epi64x(0, -2, -3, 4); let r = _mm256_movepi64_mask(a); let e = 0b0110; @@ -10002,7 +10200,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_movepi64_mask() { + const unsafe fn test_mm512_movepi64_mask() { let a = _mm512_set_epi64(0, -2, -3, 4, -5, 6, 7, -8); let r = _mm512_movepi64_mask(a); let e = 0b01101001; @@ -10010,7 +10208,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_movm_epi32() { + const unsafe fn test_mm_movm_epi32() { let a = 0b0110; let r = _mm_movm_epi32(a); let e = _mm_set_epi32(0, -1, -1, 0); @@ -10018,7 +10216,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_movm_epi32() { + const unsafe fn test_mm256_movm_epi32() { let a = 0b01101001; let r = _mm256_movm_epi32(a); let e = _mm256_set_epi32(0, -1, -1, 0, -1, 0, 0, -1); @@ -10026,7 +10224,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_movm_epi32() { + const unsafe fn test_mm512_movm_epi32() { let a = 0b0110100100111100; let r = _mm512_movm_epi32(a); let e = _mm512_set_epi32(0, -1, -1, 0, -1, 0, 0, -1, 0, 0, -1, -1, -1, -1, 0, 0); @@ -10034,7 +10232,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm_movm_epi64() { + const unsafe fn test_mm_movm_epi64() { let a = 0b01; let r = _mm_movm_epi64(a); let e = _mm_set_epi64x(0, -1); @@ -10042,7 +10240,7 @@ mod tests { } #[simd_test(enable = "avx512dq,avx512vl")] - unsafe fn test_mm256_movm_epi64() { + const unsafe fn test_mm256_movm_epi64() { let a = 0b0110; let r = _mm256_movm_epi64(a); let e = _mm256_set_epi64x(0, -1, -1, 0); @@ -10050,7 +10248,7 @@ mod tests { } #[simd_test(enable = "avx512dq")] - unsafe fn test_mm512_movm_epi64() { + const unsafe fn test_mm512_movm_epi64() { let a = 0b01101001; let r = _mm512_movm_epi64(a); let e = _mm512_set_epi64(0, -1, -1, 0, -1, 0, 0, -1); diff --git a/crates/core_arch/src/x86/avx512f.rs b/crates/core_arch/src/x86/avx512f.rs index 8d3ddbd964..f300bf48ef 100644 --- a/crates/core_arch/src/x86/avx512f.rs +++ b/crates/core_arch/src/x86/avx512f.rs @@ -17,7 +17,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsd))] -pub fn _mm512_abs_epi32(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_abs_epi32(a: __m512i) -> __m512i { unsafe { let a = a.as_i32x16(); let r = simd_select::(simd_lt(a, i32x16::ZERO), simd_neg(a), a); @@ -34,7 +35,8 @@ pub fn _mm512_abs_epi32(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsd))] -pub fn _mm512_mask_abs_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_abs_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { unsafe { let abs = _mm512_abs_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, abs, src.as_i32x16())) @@ -50,7 +52,8 @@ pub fn _mm512_mask_abs_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsd))] -pub fn _mm512_maskz_abs_epi32(k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_abs_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { let abs = _mm512_abs_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, abs, i32x16::ZERO)) @@ -64,7 +67,8 @@ pub fn _mm512_maskz_abs_epi32(k: __mmask16, a: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsd))] -pub fn _mm256_mask_abs_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_abs_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { let abs = _mm256_abs_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, abs, src.as_i32x8())) @@ -78,7 +82,8 @@ pub fn _mm256_mask_abs_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsd))] -pub fn _mm256_maskz_abs_epi32(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_abs_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { let abs = _mm256_abs_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, abs, i32x8::ZERO)) @@ -92,7 +97,8 @@ pub fn _mm256_maskz_abs_epi32(k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsd))] -pub fn _mm_mask_abs_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_abs_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let abs = _mm_abs_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, abs, src.as_i32x4())) @@ -106,7 +112,8 @@ pub fn _mm_mask_abs_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsd))] -pub fn _mm_maskz_abs_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_abs_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { let abs = _mm_abs_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, abs, i32x4::ZERO)) @@ -120,7 +127,8 @@ pub fn _mm_maskz_abs_epi32(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsq))] -pub fn _mm512_abs_epi64(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_abs_epi64(a: __m512i) -> __m512i { unsafe { let a = a.as_i64x8(); let r = simd_select::(simd_lt(a, i64x8::ZERO), simd_neg(a), a); @@ -135,7 +143,8 @@ pub fn _mm512_abs_epi64(a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsq))] -pub fn _mm512_mask_abs_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_abs_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { unsafe { let abs = _mm512_abs_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, abs, src.as_i64x8())) @@ -149,7 +158,8 @@ pub fn _mm512_mask_abs_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsq))] -pub fn _mm512_maskz_abs_epi64(k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_abs_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { let abs = _mm512_abs_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, abs, i64x8::ZERO)) @@ -163,7 +173,8 @@ pub fn _mm512_maskz_abs_epi64(k: __mmask8, a: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsq))] -pub fn _mm256_abs_epi64(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_abs_epi64(a: __m256i) -> __m256i { unsafe { let a = a.as_i64x4(); let r = simd_select::(simd_lt(a, i64x4::ZERO), simd_neg(a), a); @@ -178,7 +189,8 @@ pub fn _mm256_abs_epi64(a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsq))] -pub fn _mm256_mask_abs_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_abs_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { let abs = _mm256_abs_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, abs, src.as_i64x4())) @@ -192,7 +204,8 @@ pub fn _mm256_mask_abs_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsq))] -pub fn _mm256_maskz_abs_epi64(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_abs_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { let abs = _mm256_abs_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, abs, i64x4::ZERO)) @@ -206,7 +219,8 @@ pub fn _mm256_maskz_abs_epi64(k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsq))] -pub fn _mm_abs_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_abs_epi64(a: __m128i) -> __m128i { unsafe { let a = a.as_i64x2(); let r = simd_select::(simd_lt(a, i64x2::ZERO), simd_neg(a), a); @@ -221,7 +235,8 @@ pub fn _mm_abs_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsq))] -pub fn _mm_mask_abs_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_abs_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let abs = _mm_abs_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, abs, src.as_i64x2())) @@ -235,7 +250,8 @@ pub fn _mm_mask_abs_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpabsq))] -pub fn _mm_maskz_abs_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_abs_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { let abs = _mm_abs_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, abs, i64x2::ZERO)) @@ -249,7 +265,8 @@ pub fn _mm_maskz_abs_epi64(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandd))] -pub fn _mm512_abs_ps(v2: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_abs_ps(v2: __m512) -> __m512 { unsafe { simd_fabs(v2) } } @@ -260,7 +277,8 @@ pub fn _mm512_abs_ps(v2: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandd))] -pub fn _mm512_mask_abs_ps(src: __m512, k: __mmask16, v2: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_abs_ps(src: __m512, k: __mmask16, v2: __m512) -> __m512 { unsafe { simd_select_bitmask(k, simd_fabs(v2), src) } } @@ -271,7 +289,8 @@ pub fn _mm512_mask_abs_ps(src: __m512, k: __mmask16, v2: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandq))] -pub fn _mm512_abs_pd(v2: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_abs_pd(v2: __m512d) -> __m512d { unsafe { simd_fabs(v2) } } @@ -282,7 +301,8 @@ pub fn _mm512_abs_pd(v2: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandq))] -pub fn _mm512_mask_abs_pd(src: __m512d, k: __mmask8, v2: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_abs_pd(src: __m512d, k: __mmask8, v2: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, simd_fabs(v2), src) } } @@ -293,7 +313,8 @@ pub fn _mm512_mask_abs_pd(src: __m512d, k: __mmask8, v2: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa32))] -pub fn _mm512_mask_mov_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mov_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { unsafe { let mov = a.as_i32x16(); transmute(simd_select_bitmask(k, mov, src.as_i32x16())) @@ -307,7 +328,8 @@ pub fn _mm512_mask_mov_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa32))] -pub fn _mm512_maskz_mov_epi32(k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mov_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { let mov = a.as_i32x16(); transmute(simd_select_bitmask(k, mov, i32x16::ZERO)) @@ -321,7 +343,8 @@ pub fn _mm512_maskz_mov_epi32(k: __mmask16, a: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa32))] -pub fn _mm256_mask_mov_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mov_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { let mov = a.as_i32x8(); transmute(simd_select_bitmask(k, mov, src.as_i32x8())) @@ -335,7 +358,8 @@ pub fn _mm256_mask_mov_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa32))] -pub fn _mm256_maskz_mov_epi32(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mov_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { let mov = a.as_i32x8(); transmute(simd_select_bitmask(k, mov, i32x8::ZERO)) @@ -349,7 +373,8 @@ pub fn _mm256_maskz_mov_epi32(k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa32))] -pub fn _mm_mask_mov_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mov_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let mov = a.as_i32x4(); transmute(simd_select_bitmask(k, mov, src.as_i32x4())) @@ -363,7 +388,8 @@ pub fn _mm_mask_mov_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa32))] -pub fn _mm_maskz_mov_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mov_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { let mov = a.as_i32x4(); transmute(simd_select_bitmask(k, mov, i32x4::ZERO)) @@ -377,7 +403,8 @@ pub fn _mm_maskz_mov_epi32(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa64))] -pub fn _mm512_mask_mov_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mov_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { unsafe { let mov = a.as_i64x8(); transmute(simd_select_bitmask(k, mov, src.as_i64x8())) @@ -391,7 +418,8 @@ pub fn _mm512_mask_mov_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa64))] -pub fn _mm512_maskz_mov_epi64(k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mov_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { let mov = a.as_i64x8(); transmute(simd_select_bitmask(k, mov, i64x8::ZERO)) @@ -405,7 +433,8 @@ pub fn _mm512_maskz_mov_epi64(k: __mmask8, a: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa64))] -pub fn _mm256_mask_mov_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mov_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { let mov = a.as_i64x4(); transmute(simd_select_bitmask(k, mov, src.as_i64x4())) @@ -419,7 +448,8 @@ pub fn _mm256_mask_mov_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa64))] -pub fn _mm256_maskz_mov_epi64(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mov_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { let mov = a.as_i64x4(); transmute(simd_select_bitmask(k, mov, i64x4::ZERO)) @@ -433,7 +463,8 @@ pub fn _mm256_maskz_mov_epi64(k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa64))] -pub fn _mm_mask_mov_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mov_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let mov = a.as_i64x2(); transmute(simd_select_bitmask(k, mov, src.as_i64x2())) @@ -447,7 +478,8 @@ pub fn _mm_mask_mov_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa64))] -pub fn _mm_maskz_mov_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mov_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { let mov = a.as_i64x2(); transmute(simd_select_bitmask(k, mov, i64x2::ZERO)) @@ -461,7 +493,8 @@ pub fn _mm_maskz_mov_epi64(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovaps))] -pub fn _mm512_mask_mov_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mov_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { unsafe { let mov = a.as_f32x16(); transmute(simd_select_bitmask(k, mov, src.as_f32x16())) @@ -475,7 +508,8 @@ pub fn _mm512_mask_mov_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovaps))] -pub fn _mm512_maskz_mov_ps(k: __mmask16, a: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mov_ps(k: __mmask16, a: __m512) -> __m512 { unsafe { let mov = a.as_f32x16(); transmute(simd_select_bitmask(k, mov, f32x16::ZERO)) @@ -489,7 +523,8 @@ pub fn _mm512_maskz_mov_ps(k: __mmask16, a: __m512) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovaps))] -pub fn _mm256_mask_mov_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mov_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { unsafe { let mov = a.as_f32x8(); transmute(simd_select_bitmask(k, mov, src.as_f32x8())) @@ -503,7 +538,8 @@ pub fn _mm256_mask_mov_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovaps))] -pub fn _mm256_maskz_mov_ps(k: __mmask8, a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mov_ps(k: __mmask8, a: __m256) -> __m256 { unsafe { let mov = a.as_f32x8(); transmute(simd_select_bitmask(k, mov, f32x8::ZERO)) @@ -517,7 +553,8 @@ pub fn _mm256_maskz_mov_ps(k: __mmask8, a: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovaps))] -pub fn _mm_mask_mov_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mov_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { unsafe { let mov = a.as_f32x4(); transmute(simd_select_bitmask(k, mov, src.as_f32x4())) @@ -531,7 +568,8 @@ pub fn _mm_mask_mov_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovaps))] -pub fn _mm_maskz_mov_ps(k: __mmask8, a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mov_ps(k: __mmask8, a: __m128) -> __m128 { unsafe { let mov = a.as_f32x4(); transmute(simd_select_bitmask(k, mov, f32x4::ZERO)) @@ -545,7 +583,8 @@ pub fn _mm_maskz_mov_ps(k: __mmask8, a: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovapd))] -pub fn _mm512_mask_mov_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mov_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { unsafe { let mov = a.as_f64x8(); transmute(simd_select_bitmask(k, mov, src.as_f64x8())) @@ -559,7 +598,8 @@ pub fn _mm512_mask_mov_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovapd))] -pub fn _mm512_maskz_mov_pd(k: __mmask8, a: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mov_pd(k: __mmask8, a: __m512d) -> __m512d { unsafe { let mov = a.as_f64x8(); transmute(simd_select_bitmask(k, mov, f64x8::ZERO)) @@ -573,7 +613,8 @@ pub fn _mm512_maskz_mov_pd(k: __mmask8, a: __m512d) -> __m512d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovapd))] -pub fn _mm256_mask_mov_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mov_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { unsafe { let mov = a.as_f64x4(); transmute(simd_select_bitmask(k, mov, src.as_f64x4())) @@ -587,7 +628,8 @@ pub fn _mm256_mask_mov_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovapd))] -pub fn _mm256_maskz_mov_pd(k: __mmask8, a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mov_pd(k: __mmask8, a: __m256d) -> __m256d { unsafe { let mov = a.as_f64x4(); transmute(simd_select_bitmask(k, mov, f64x4::ZERO)) @@ -601,7 +643,8 @@ pub fn _mm256_maskz_mov_pd(k: __mmask8, a: __m256d) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovapd))] -pub fn _mm_mask_mov_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mov_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { unsafe { let mov = a.as_f64x2(); transmute(simd_select_bitmask(k, mov, src.as_f64x2())) @@ -615,7 +658,8 @@ pub fn _mm_mask_mov_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovapd))] -pub fn _mm_maskz_mov_pd(k: __mmask8, a: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mov_pd(k: __mmask8, a: __m128d) -> __m128d { unsafe { let mov = a.as_f64x2(); transmute(simd_select_bitmask(k, mov, f64x2::ZERO)) @@ -629,7 +673,8 @@ pub fn _mm_maskz_mov_pd(k: __mmask8, a: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddd))] -pub fn _mm512_add_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_add_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_add(a.as_i32x16(), b.as_i32x16())) } } @@ -640,7 +685,8 @@ pub fn _mm512_add_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddd))] -pub fn _mm512_mask_add_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_add_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_add_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, add, src.as_i32x16())) @@ -654,7 +700,8 @@ pub fn _mm512_mask_add_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddd))] -pub fn _mm512_maskz_add_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_add_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_add_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, add, i32x16::ZERO)) @@ -668,7 +715,8 @@ pub fn _mm512_maskz_add_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddd))] -pub fn _mm256_mask_add_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_add_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_add_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, add, src.as_i32x8())) @@ -682,7 +730,8 @@ pub fn _mm256_mask_add_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddd))] -pub fn _mm256_maskz_add_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_add_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_add_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, add, i32x8::ZERO)) @@ -696,7 +745,8 @@ pub fn _mm256_maskz_add_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddd))] -pub fn _mm_mask_add_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_add_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_add_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, add, src.as_i32x4())) @@ -710,7 +760,8 @@ pub fn _mm_mask_add_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddd))] -pub fn _mm_maskz_add_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_add_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_add_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, add, i32x4::ZERO)) @@ -724,7 +775,8 @@ pub fn _mm_maskz_add_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddq))] -pub fn _mm512_add_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_add_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_add(a.as_i64x8(), b.as_i64x8())) } } @@ -735,7 +787,8 @@ pub fn _mm512_add_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddq))] -pub fn _mm512_mask_add_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_add_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_add_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, add, src.as_i64x8())) @@ -749,7 +802,8 @@ pub fn _mm512_mask_add_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddq))] -pub fn _mm512_maskz_add_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_add_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let add = _mm512_add_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, add, i64x8::ZERO)) @@ -763,7 +817,8 @@ pub fn _mm512_maskz_add_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddq))] -pub fn _mm256_mask_add_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_add_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_add_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, add, src.as_i64x4())) @@ -777,7 +832,8 @@ pub fn _mm256_mask_add_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddq))] -pub fn _mm256_maskz_add_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_add_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let add = _mm256_add_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, add, i64x4::ZERO)) @@ -791,7 +847,8 @@ pub fn _mm256_maskz_add_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddq))] -pub fn _mm_mask_add_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_add_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_add_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, add, src.as_i64x2())) @@ -805,7 +862,8 @@ pub fn _mm_mask_add_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpaddq))] -pub fn _mm_maskz_add_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_add_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let add = _mm_add_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, add, i64x2::ZERO)) @@ -819,7 +877,8 @@ pub fn _mm_maskz_add_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddps))] -pub fn _mm512_add_ps(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_add_ps(a: __m512, b: __m512) -> __m512 { unsafe { transmute(simd_add(a.as_f32x16(), b.as_f32x16())) } } @@ -830,7 +889,8 @@ pub fn _mm512_add_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddps))] -pub fn _mm512_mask_add_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_add_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let add = _mm512_add_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, add, src.as_f32x16())) @@ -844,7 +904,8 @@ pub fn _mm512_mask_add_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddps))] -pub fn _mm512_maskz_add_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_add_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let add = _mm512_add_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, add, f32x16::ZERO)) @@ -858,7 +919,8 @@ pub fn _mm512_maskz_add_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddps))] -pub fn _mm256_mask_add_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_add_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let add = _mm256_add_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, add, src.as_f32x8())) @@ -872,7 +934,8 @@ pub fn _mm256_mask_add_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddps))] -pub fn _mm256_maskz_add_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_add_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let add = _mm256_add_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, add, f32x8::ZERO)) @@ -886,7 +949,8 @@ pub fn _mm256_maskz_add_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddps))] -pub fn _mm_mask_add_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_add_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let add = _mm_add_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, add, src.as_f32x4())) @@ -900,7 +964,8 @@ pub fn _mm_mask_add_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddps))] -pub fn _mm_maskz_add_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_add_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let add = _mm_add_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, add, f32x4::ZERO)) @@ -914,7 +979,8 @@ pub fn _mm_maskz_add_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddpd))] -pub fn _mm512_add_pd(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_add_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { transmute(simd_add(a.as_f64x8(), b.as_f64x8())) } } @@ -925,7 +991,8 @@ pub fn _mm512_add_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddpd))] -pub fn _mm512_mask_add_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_add_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let add = _mm512_add_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, add, src.as_f64x8())) @@ -939,7 +1006,8 @@ pub fn _mm512_mask_add_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddpd))] -pub fn _mm512_maskz_add_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_add_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let add = _mm512_add_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, add, f64x8::ZERO)) @@ -953,7 +1021,8 @@ pub fn _mm512_maskz_add_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddpd))] -pub fn _mm256_mask_add_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_add_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let add = _mm256_add_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, add, src.as_f64x4())) @@ -967,7 +1036,8 @@ pub fn _mm256_mask_add_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddpd))] -pub fn _mm256_maskz_add_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_add_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let add = _mm256_add_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, add, f64x4::ZERO)) @@ -981,7 +1051,8 @@ pub fn _mm256_maskz_add_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddpd))] -pub fn _mm_mask_add_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_add_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let add = _mm_add_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, add, src.as_f64x2())) @@ -995,7 +1066,8 @@ pub fn _mm_mask_add_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddpd))] -pub fn _mm_maskz_add_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_add_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let add = _mm_add_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, add, f64x2::ZERO)) @@ -1009,7 +1081,8 @@ pub fn _mm_maskz_add_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubd))] -pub fn _mm512_sub_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_sub_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_sub(a.as_i32x16(), b.as_i32x16())) } } @@ -1020,7 +1093,8 @@ pub fn _mm512_sub_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubd))] -pub fn _mm512_mask_sub_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_sub_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_sub_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, sub, src.as_i32x16())) @@ -1034,7 +1108,8 @@ pub fn _mm512_mask_sub_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubd))] -pub fn _mm512_maskz_sub_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_sub_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_sub_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, sub, i32x16::ZERO)) @@ -1048,7 +1123,8 @@ pub fn _mm512_maskz_sub_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubd))] -pub fn _mm256_mask_sub_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_sub_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_sub_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, sub, src.as_i32x8())) @@ -1062,7 +1138,8 @@ pub fn _mm256_mask_sub_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubd))] -pub fn _mm256_maskz_sub_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_sub_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_sub_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, sub, i32x8::ZERO)) @@ -1076,7 +1153,8 @@ pub fn _mm256_maskz_sub_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubd))] -pub fn _mm_mask_sub_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sub_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_sub_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, sub, src.as_i32x4())) @@ -1090,7 +1168,8 @@ pub fn _mm_mask_sub_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubd))] -pub fn _mm_maskz_sub_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sub_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_sub_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, sub, i32x4::ZERO)) @@ -1104,7 +1183,8 @@ pub fn _mm_maskz_sub_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubq))] -pub fn _mm512_sub_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_sub_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_sub(a.as_i64x8(), b.as_i64x8())) } } @@ -1115,7 +1195,8 @@ pub fn _mm512_sub_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubq))] -pub fn _mm512_mask_sub_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_sub_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_sub_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, sub, src.as_i64x8())) @@ -1129,7 +1210,8 @@ pub fn _mm512_mask_sub_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubq))] -pub fn _mm512_maskz_sub_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_sub_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let sub = _mm512_sub_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, sub, i64x8::ZERO)) @@ -1143,7 +1225,8 @@ pub fn _mm512_maskz_sub_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubq))] -pub fn _mm256_mask_sub_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_sub_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_sub_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, sub, src.as_i64x4())) @@ -1157,7 +1240,8 @@ pub fn _mm256_mask_sub_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubq))] -pub fn _mm256_maskz_sub_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_sub_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let sub = _mm256_sub_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, sub, i64x4::ZERO)) @@ -1171,7 +1255,8 @@ pub fn _mm256_maskz_sub_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubq))] -pub fn _mm_mask_sub_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sub_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_sub_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, sub, src.as_i64x2())) @@ -1185,7 +1270,8 @@ pub fn _mm_mask_sub_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsubq))] -pub fn _mm_maskz_sub_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sub_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let sub = _mm_sub_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, sub, i64x2::ZERO)) @@ -1199,7 +1285,8 @@ pub fn _mm_maskz_sub_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubps))] -pub fn _mm512_sub_ps(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_sub_ps(a: __m512, b: __m512) -> __m512 { unsafe { transmute(simd_sub(a.as_f32x16(), b.as_f32x16())) } } @@ -1210,7 +1297,8 @@ pub fn _mm512_sub_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubps))] -pub fn _mm512_mask_sub_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_sub_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let sub = _mm512_sub_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, sub, src.as_f32x16())) @@ -1224,7 +1312,8 @@ pub fn _mm512_mask_sub_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubps))] -pub fn _mm512_maskz_sub_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_sub_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let sub = _mm512_sub_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, sub, f32x16::ZERO)) @@ -1238,7 +1327,8 @@ pub fn _mm512_maskz_sub_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubps))] -pub fn _mm256_mask_sub_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_sub_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let sub = _mm256_sub_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, sub, src.as_f32x8())) @@ -1252,7 +1342,8 @@ pub fn _mm256_mask_sub_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubps))] -pub fn _mm256_maskz_sub_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_sub_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let sub = _mm256_sub_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, sub, f32x8::ZERO)) @@ -1266,7 +1357,8 @@ pub fn _mm256_maskz_sub_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubps))] -pub fn _mm_mask_sub_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sub_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let sub = _mm_sub_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, sub, src.as_f32x4())) @@ -1280,7 +1372,8 @@ pub fn _mm_mask_sub_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubps))] -pub fn _mm_maskz_sub_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sub_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let sub = _mm_sub_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, sub, f32x4::ZERO)) @@ -1294,7 +1387,8 @@ pub fn _mm_maskz_sub_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubpd))] -pub fn _mm512_sub_pd(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_sub_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { transmute(simd_sub(a.as_f64x8(), b.as_f64x8())) } } @@ -1305,7 +1399,8 @@ pub fn _mm512_sub_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubpd))] -pub fn _mm512_mask_sub_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_sub_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let sub = _mm512_sub_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, sub, src.as_f64x8())) @@ -1319,7 +1414,8 @@ pub fn _mm512_mask_sub_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubpd))] -pub fn _mm512_maskz_sub_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_sub_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let sub = _mm512_sub_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, sub, f64x8::ZERO)) @@ -1333,7 +1429,8 @@ pub fn _mm512_maskz_sub_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubpd))] -pub fn _mm256_mask_sub_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_sub_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let sub = _mm256_sub_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, sub, src.as_f64x4())) @@ -1347,7 +1444,8 @@ pub fn _mm256_mask_sub_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubpd))] -pub fn _mm256_maskz_sub_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_sub_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let sub = _mm256_sub_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, sub, f64x4::ZERO)) @@ -1361,7 +1459,8 @@ pub fn _mm256_maskz_sub_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubpd))] -pub fn _mm_mask_sub_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sub_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let sub = _mm_sub_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, sub, src.as_f64x2())) @@ -1375,7 +1474,8 @@ pub fn _mm_mask_sub_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubpd))] -pub fn _mm_maskz_sub_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sub_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let sub = _mm_sub_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, sub, f64x2::ZERO)) @@ -1389,7 +1489,8 @@ pub fn _mm_maskz_sub_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuldq))] -pub fn _mm512_mul_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mul_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = simd_cast::<_, i64x8>(simd_cast::<_, i32x8>(a.as_i64x8())); let b = simd_cast::<_, i64x8>(simd_cast::<_, i32x8>(b.as_i64x8())); @@ -1404,7 +1505,8 @@ pub fn _mm512_mul_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuldq))] -pub fn _mm512_mask_mul_epi32(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mul_epi32(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let mul = _mm512_mul_epi32(a, b).as_i64x8(); transmute(simd_select_bitmask(k, mul, src.as_i64x8())) @@ -1418,7 +1520,8 @@ pub fn _mm512_mask_mul_epi32(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuldq))] -pub fn _mm512_maskz_mul_epi32(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mul_epi32(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let mul = _mm512_mul_epi32(a, b).as_i64x8(); transmute(simd_select_bitmask(k, mul, i64x8::ZERO)) @@ -1432,7 +1535,8 @@ pub fn _mm512_maskz_mul_epi32(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuldq))] -pub fn _mm256_mask_mul_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mul_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let mul = _mm256_mul_epi32(a, b).as_i64x4(); transmute(simd_select_bitmask(k, mul, src.as_i64x4())) @@ -1446,7 +1550,8 @@ pub fn _mm256_mask_mul_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuldq))] -pub fn _mm256_maskz_mul_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mul_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let mul = _mm256_mul_epi32(a, b).as_i64x4(); transmute(simd_select_bitmask(k, mul, i64x4::ZERO)) @@ -1460,7 +1565,8 @@ pub fn _mm256_maskz_mul_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuldq))] -pub fn _mm_mask_mul_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mul_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mul_epi32(a, b).as_i64x2(); transmute(simd_select_bitmask(k, mul, src.as_i64x2())) @@ -1474,7 +1580,8 @@ pub fn _mm_mask_mul_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuldq))] -pub fn _mm_maskz_mul_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mul_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mul_epi32(a, b).as_i64x2(); transmute(simd_select_bitmask(k, mul, i64x2::ZERO)) @@ -1488,7 +1595,8 @@ pub fn _mm_maskz_mul_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulld))] -pub fn _mm512_mullo_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mullo_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_mul(a.as_i32x16(), b.as_i32x16())) } } @@ -1499,7 +1607,13 @@ pub fn _mm512_mullo_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulld))] -pub fn _mm512_mask_mullo_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mullo_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let mul = _mm512_mullo_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, mul, src.as_i32x16())) @@ -1513,7 +1627,8 @@ pub fn _mm512_mask_mullo_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulld))] -pub fn _mm512_maskz_mullo_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mullo_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let mul = _mm512_mullo_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, mul, i32x16::ZERO)) @@ -1527,7 +1642,8 @@ pub fn _mm512_maskz_mullo_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulld))] -pub fn _mm256_mask_mullo_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mullo_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let mul = _mm256_mullo_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, mul, src.as_i32x8())) @@ -1541,7 +1657,8 @@ pub fn _mm256_mask_mullo_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulld))] -pub fn _mm256_maskz_mullo_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mullo_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let mul = _mm256_mullo_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, mul, i32x8::ZERO)) @@ -1555,7 +1672,8 @@ pub fn _mm256_maskz_mullo_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulld))] -pub fn _mm_mask_mullo_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mullo_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mullo_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, mul, src.as_i32x4())) @@ -1569,7 +1687,8 @@ pub fn _mm_mask_mullo_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmulld))] -pub fn _mm_maskz_mullo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mullo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mullo_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, mul, i32x4::ZERO)) @@ -1584,7 +1703,8 @@ pub fn _mm_maskz_mullo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mullox_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mullox_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_mul(a.as_i64x8(), b.as_i64x8())) } } @@ -1596,7 +1716,13 @@ pub fn _mm512_mullox_epi64(a: __m512i, b: __m512i) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_mullox_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mullox_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let mul = _mm512_mullox_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, mul, src.as_i64x8())) @@ -1610,11 +1736,12 @@ pub fn _mm512_mask_mullox_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuludq))] -pub fn _mm512_mul_epu32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mul_epu32(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = a.as_u64x8(); let b = b.as_u64x8(); - let mask = u64x8::splat(u32::MAX.into()); + let mask = u64x8::splat(u32::MAX as u64); transmute(simd_mul(simd_and(a, mask), simd_and(b, mask))) } } @@ -1626,7 +1753,8 @@ pub fn _mm512_mul_epu32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuludq))] -pub fn _mm512_mask_mul_epu32(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mul_epu32(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let mul = _mm512_mul_epu32(a, b).as_u64x8(); transmute(simd_select_bitmask(k, mul, src.as_u64x8())) @@ -1640,7 +1768,8 @@ pub fn _mm512_mask_mul_epu32(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuludq))] -pub fn _mm512_maskz_mul_epu32(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mul_epu32(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let mul = _mm512_mul_epu32(a, b).as_u64x8(); transmute(simd_select_bitmask(k, mul, u64x8::ZERO)) @@ -1654,7 +1783,8 @@ pub fn _mm512_maskz_mul_epu32(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuludq))] -pub fn _mm256_mask_mul_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mul_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let mul = _mm256_mul_epu32(a, b).as_u64x4(); transmute(simd_select_bitmask(k, mul, src.as_u64x4())) @@ -1668,7 +1798,8 @@ pub fn _mm256_mask_mul_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuludq))] -pub fn _mm256_maskz_mul_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mul_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let mul = _mm256_mul_epu32(a, b).as_u64x4(); transmute(simd_select_bitmask(k, mul, u64x4::ZERO)) @@ -1682,7 +1813,8 @@ pub fn _mm256_maskz_mul_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuludq))] -pub fn _mm_mask_mul_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mul_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mul_epu32(a, b).as_u64x2(); transmute(simd_select_bitmask(k, mul, src.as_u64x2())) @@ -1696,7 +1828,8 @@ pub fn _mm_mask_mul_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmuludq))] -pub fn _mm_maskz_mul_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mul_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let mul = _mm_mul_epu32(a, b).as_u64x2(); transmute(simd_select_bitmask(k, mul, u64x2::ZERO)) @@ -1710,7 +1843,8 @@ pub fn _mm_maskz_mul_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulps))] -pub fn _mm512_mul_ps(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mul_ps(a: __m512, b: __m512) -> __m512 { unsafe { transmute(simd_mul(a.as_f32x16(), b.as_f32x16())) } } @@ -1721,7 +1855,8 @@ pub fn _mm512_mul_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulps))] -pub fn _mm512_mask_mul_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mul_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let mul = _mm512_mul_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, mul, src.as_f32x16())) @@ -1735,7 +1870,8 @@ pub fn _mm512_mask_mul_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulps))] -pub fn _mm512_maskz_mul_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mul_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let mul = _mm512_mul_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, mul, f32x16::ZERO)) @@ -1749,7 +1885,8 @@ pub fn _mm512_maskz_mul_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulps))] -pub fn _mm256_mask_mul_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mul_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let mul = _mm256_mul_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, mul, src.as_f32x8())) @@ -1763,7 +1900,8 @@ pub fn _mm256_mask_mul_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulps))] -pub fn _mm256_maskz_mul_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mul_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let mul = _mm256_mul_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, mul, f32x8::ZERO)) @@ -1777,7 +1915,8 @@ pub fn _mm256_maskz_mul_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulps))] -pub fn _mm_mask_mul_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mul_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let mul = _mm_mul_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, mul, src.as_f32x4())) @@ -1791,7 +1930,8 @@ pub fn _mm_mask_mul_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulps))] -pub fn _mm_maskz_mul_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mul_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let mul = _mm_mul_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, mul, f32x4::ZERO)) @@ -1805,7 +1945,8 @@ pub fn _mm_maskz_mul_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulpd))] -pub fn _mm512_mul_pd(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mul_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { transmute(simd_mul(a.as_f64x8(), b.as_f64x8())) } } @@ -1816,7 +1957,8 @@ pub fn _mm512_mul_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulpd))] -pub fn _mm512_mask_mul_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mul_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let mul = _mm512_mul_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, mul, src.as_f64x8())) @@ -1830,7 +1972,8 @@ pub fn _mm512_mask_mul_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulpd))] -pub fn _mm512_maskz_mul_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mul_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let mul = _mm512_mul_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, mul, f64x8::ZERO)) @@ -1844,7 +1987,8 @@ pub fn _mm512_maskz_mul_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulpd))] -pub fn _mm256_mask_mul_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mul_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let mul = _mm256_mul_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, mul, src.as_f64x4())) @@ -1858,7 +2002,8 @@ pub fn _mm256_mask_mul_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulpd))] -pub fn _mm256_maskz_mul_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mul_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let mul = _mm256_mul_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, mul, f64x4::ZERO)) @@ -1872,7 +2017,8 @@ pub fn _mm256_maskz_mul_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulpd))] -pub fn _mm_mask_mul_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mul_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let mul = _mm_mul_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, mul, src.as_f64x2())) @@ -1886,7 +2032,8 @@ pub fn _mm_mask_mul_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulpd))] -pub fn _mm_maskz_mul_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mul_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let mul = _mm_mul_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, mul, f64x2::ZERO)) @@ -1900,7 +2047,8 @@ pub fn _mm_maskz_mul_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivps))] -pub fn _mm512_div_ps(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_div_ps(a: __m512, b: __m512) -> __m512 { unsafe { transmute(simd_div(a.as_f32x16(), b.as_f32x16())) } } @@ -1911,7 +2059,8 @@ pub fn _mm512_div_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivps))] -pub fn _mm512_mask_div_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_div_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let div = _mm512_div_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, div, src.as_f32x16())) @@ -1925,7 +2074,8 @@ pub fn _mm512_mask_div_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivps))] -pub fn _mm512_maskz_div_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_div_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let div = _mm512_div_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, div, f32x16::ZERO)) @@ -1939,7 +2089,8 @@ pub fn _mm512_maskz_div_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivps))] -pub fn _mm256_mask_div_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_div_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let div = _mm256_div_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, div, src.as_f32x8())) @@ -1953,7 +2104,8 @@ pub fn _mm256_mask_div_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivps))] -pub fn _mm256_maskz_div_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_div_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let div = _mm256_div_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, div, f32x8::ZERO)) @@ -1967,7 +2119,8 @@ pub fn _mm256_maskz_div_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivps))] -pub fn _mm_mask_div_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_div_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let div = _mm_div_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, div, src.as_f32x4())) @@ -1981,7 +2134,8 @@ pub fn _mm_mask_div_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivps))] -pub fn _mm_maskz_div_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_div_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let div = _mm_div_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, div, f32x4::ZERO)) @@ -1995,7 +2149,8 @@ pub fn _mm_maskz_div_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivpd))] -pub fn _mm512_div_pd(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_div_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { transmute(simd_div(a.as_f64x8(), b.as_f64x8())) } } @@ -2006,7 +2161,8 @@ pub fn _mm512_div_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivpd))] -pub fn _mm512_mask_div_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_div_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let div = _mm512_div_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, div, src.as_f64x8())) @@ -2020,7 +2176,8 @@ pub fn _mm512_mask_div_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivpd))] -pub fn _mm512_maskz_div_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_div_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let div = _mm512_div_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, div, f64x8::ZERO)) @@ -2034,7 +2191,8 @@ pub fn _mm512_maskz_div_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivpd))] -pub fn _mm256_mask_div_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_div_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let div = _mm256_div_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, div, src.as_f64x4())) @@ -2048,7 +2206,8 @@ pub fn _mm256_mask_div_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivpd))] -pub fn _mm256_maskz_div_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_div_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let div = _mm256_div_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, div, f64x4::ZERO)) @@ -2062,7 +2221,8 @@ pub fn _mm256_maskz_div_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivpd))] -pub fn _mm_mask_div_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_div_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let div = _mm_div_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, div, src.as_f64x2())) @@ -2076,7 +2236,8 @@ pub fn _mm_mask_div_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivpd))] -pub fn _mm_maskz_div_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_div_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let div = _mm_div_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, div, f64x2::ZERO)) @@ -2090,7 +2251,8 @@ pub fn _mm_maskz_div_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsd))] -pub fn _mm512_max_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_max_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imax(a.as_i32x16(), b.as_i32x16()).as_m512i() } } @@ -2101,7 +2263,8 @@ pub fn _mm512_max_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsd))] -pub fn _mm512_mask_max_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_max_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, max, src.as_i32x16())) @@ -2115,7 +2278,8 @@ pub fn _mm512_mask_max_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsd))] -pub fn _mm512_maskz_max_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_max_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, max, i32x16::ZERO)) @@ -2129,7 +2293,8 @@ pub fn _mm512_maskz_max_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsd))] -pub fn _mm256_mask_max_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_max_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, max, src.as_i32x8())) @@ -2143,7 +2308,8 @@ pub fn _mm256_mask_max_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsd))] -pub fn _mm256_maskz_max_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_max_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, max, i32x8::ZERO)) @@ -2157,7 +2323,8 @@ pub fn _mm256_maskz_max_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsd))] -pub fn _mm_mask_max_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_max_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, max, src.as_i32x4())) @@ -2171,7 +2338,8 @@ pub fn _mm_mask_max_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsd))] -pub fn _mm_maskz_max_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_max_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, max, i32x4::ZERO)) @@ -2185,7 +2353,8 @@ pub fn _mm_maskz_max_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsq))] -pub fn _mm512_max_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_max_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imax(a.as_i64x8(), b.as_i64x8()).as_m512i() } } @@ -2196,7 +2365,8 @@ pub fn _mm512_max_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsq))] -pub fn _mm512_mask_max_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_max_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, max, src.as_i64x8())) @@ -2210,7 +2380,8 @@ pub fn _mm512_mask_max_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsq))] -pub fn _mm512_maskz_max_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_max_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, max, i64x8::ZERO)) @@ -2224,7 +2395,8 @@ pub fn _mm512_maskz_max_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsq))] -pub fn _mm256_max_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_max_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imax(a.as_i64x4(), b.as_i64x4()).as_m256i() } } @@ -2235,7 +2407,8 @@ pub fn _mm256_max_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsq))] -pub fn _mm256_mask_max_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_max_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, max, src.as_i64x4())) @@ -2249,7 +2422,8 @@ pub fn _mm256_mask_max_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsq))] -pub fn _mm256_maskz_max_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_max_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, max, i64x4::ZERO)) @@ -2263,7 +2437,8 @@ pub fn _mm256_maskz_max_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsq))] -pub fn _mm_max_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_max_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imax(a.as_i64x2(), b.as_i64x2()).as_m128i() } } @@ -2274,7 +2449,8 @@ pub fn _mm_max_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsq))] -pub fn _mm_mask_max_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_max_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, max, src.as_i64x2())) @@ -2288,7 +2464,8 @@ pub fn _mm_mask_max_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxsq))] -pub fn _mm_maskz_max_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_max_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, max, i64x2::ZERO)) @@ -2498,7 +2675,8 @@ pub fn _mm_maskz_max_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxud))] -pub fn _mm512_max_epu32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_max_epu32(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imax(a.as_u32x16(), b.as_u32x16()).as_m512i() } } @@ -2509,7 +2687,8 @@ pub fn _mm512_max_epu32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxud))] -pub fn _mm512_mask_max_epu32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_max_epu32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epu32(a, b).as_u32x16(); transmute(simd_select_bitmask(k, max, src.as_u32x16())) @@ -2523,7 +2702,8 @@ pub fn _mm512_mask_max_epu32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxud))] -pub fn _mm512_maskz_max_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_max_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epu32(a, b).as_u32x16(); transmute(simd_select_bitmask(k, max, u32x16::ZERO)) @@ -2537,7 +2717,8 @@ pub fn _mm512_maskz_max_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxud))] -pub fn _mm256_mask_max_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_max_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epu32(a, b).as_u32x8(); transmute(simd_select_bitmask(k, max, src.as_u32x8())) @@ -2551,7 +2732,8 @@ pub fn _mm256_mask_max_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxud))] -pub fn _mm256_maskz_max_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_max_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epu32(a, b).as_u32x8(); transmute(simd_select_bitmask(k, max, u32x8::ZERO)) @@ -2565,7 +2747,8 @@ pub fn _mm256_maskz_max_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxud))] -pub fn _mm_mask_max_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_max_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epu32(a, b).as_u32x4(); transmute(simd_select_bitmask(k, max, src.as_u32x4())) @@ -2579,7 +2762,8 @@ pub fn _mm_mask_max_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxud))] -pub fn _mm_maskz_max_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_max_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epu32(a, b).as_u32x4(); transmute(simd_select_bitmask(k, max, u32x4::ZERO)) @@ -2593,7 +2777,8 @@ pub fn _mm_maskz_max_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuq))] -pub fn _mm512_max_epu64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_max_epu64(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imax(a.as_u64x8(), b.as_u64x8()).as_m512i() } } @@ -2604,7 +2789,8 @@ pub fn _mm512_max_epu64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuq))] -pub fn _mm512_mask_max_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_max_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epu64(a, b).as_u64x8(); transmute(simd_select_bitmask(k, max, src.as_u64x8())) @@ -2618,7 +2804,8 @@ pub fn _mm512_mask_max_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuq))] -pub fn _mm512_maskz_max_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_max_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let max = _mm512_max_epu64(a, b).as_u64x8(); transmute(simd_select_bitmask(k, max, u64x8::ZERO)) @@ -2632,7 +2819,8 @@ pub fn _mm512_maskz_max_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuq))] -pub fn _mm256_max_epu64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_max_epu64(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imax(a.as_u64x4(), b.as_u64x4()).as_m256i() } } @@ -2643,7 +2831,8 @@ pub fn _mm256_max_epu64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuq))] -pub fn _mm256_mask_max_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_max_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epu64(a, b).as_u64x4(); transmute(simd_select_bitmask(k, max, src.as_u64x4())) @@ -2657,7 +2846,8 @@ pub fn _mm256_mask_max_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuq))] -pub fn _mm256_maskz_max_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_max_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let max = _mm256_max_epu64(a, b).as_u64x4(); transmute(simd_select_bitmask(k, max, u64x4::ZERO)) @@ -2671,7 +2861,8 @@ pub fn _mm256_maskz_max_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuq))] -pub fn _mm_max_epu64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_max_epu64(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imax(a.as_u64x2(), b.as_u64x2()).as_m128i() } } @@ -2682,7 +2873,8 @@ pub fn _mm_max_epu64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuq))] -pub fn _mm_mask_max_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_max_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epu64(a, b).as_u64x2(); transmute(simd_select_bitmask(k, max, src.as_u64x2())) @@ -2696,7 +2888,8 @@ pub fn _mm_mask_max_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmaxuq))] -pub fn _mm_maskz_max_epu64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_max_epu64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let max = _mm_max_epu64(a, b).as_u64x2(); transmute(simd_select_bitmask(k, max, u64x2::ZERO)) @@ -2710,7 +2903,8 @@ pub fn _mm_maskz_max_epu64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsd))] -pub fn _mm512_min_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_min_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imin(a.as_i32x16(), b.as_i32x16()).as_m512i() } } @@ -2721,7 +2915,8 @@ pub fn _mm512_min_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsd))] -pub fn _mm512_mask_min_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_min_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, min, src.as_i32x16())) @@ -2735,7 +2930,8 @@ pub fn _mm512_mask_min_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsd))] -pub fn _mm512_maskz_min_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_min_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, min, i32x16::ZERO)) @@ -2749,7 +2945,8 @@ pub fn _mm512_maskz_min_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsd))] -pub fn _mm256_mask_min_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_min_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, min, src.as_i32x8())) @@ -2763,7 +2960,8 @@ pub fn _mm256_mask_min_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsd))] -pub fn _mm256_maskz_min_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_min_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, min, i32x8::ZERO)) @@ -2777,7 +2975,8 @@ pub fn _mm256_maskz_min_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsd))] -pub fn _mm_mask_min_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_min_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, min, src.as_i32x4())) @@ -2791,7 +2990,8 @@ pub fn _mm_mask_min_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsd))] -pub fn _mm_maskz_min_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_min_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, min, i32x4::ZERO)) @@ -2805,7 +3005,8 @@ pub fn _mm_maskz_min_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsq))] -pub fn _mm512_min_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_min_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imin(a.as_i64x8(), b.as_i64x8()).as_m512i() } } @@ -2816,7 +3017,8 @@ pub fn _mm512_min_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsq))] -pub fn _mm512_mask_min_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_min_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, min, src.as_i64x8())) @@ -2830,7 +3032,8 @@ pub fn _mm512_mask_min_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsq))] -pub fn _mm512_maskz_min_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_min_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, min, i64x8::ZERO)) @@ -2844,7 +3047,8 @@ pub fn _mm512_maskz_min_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsq))] -pub fn _mm256_min_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_min_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imin(a.as_i64x4(), b.as_i64x4()).as_m256i() } } @@ -2855,7 +3059,8 @@ pub fn _mm256_min_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsq))] -pub fn _mm256_mask_min_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_min_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, min, src.as_i64x4())) @@ -2869,7 +3074,8 @@ pub fn _mm256_mask_min_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsq))] -pub fn _mm256_maskz_min_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_min_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, min, i64x4::ZERO)) @@ -2883,7 +3089,8 @@ pub fn _mm256_maskz_min_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsq))] -pub fn _mm_min_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_min_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imin(a.as_i64x2(), b.as_i64x2()).as_m128i() } } @@ -2894,7 +3101,8 @@ pub fn _mm_min_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsq))] -pub fn _mm_mask_min_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_min_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, min, src.as_i64x2())) @@ -2908,7 +3116,8 @@ pub fn _mm_mask_min_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminsq))] -pub fn _mm_maskz_min_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_min_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, min, i64x2::ZERO)) @@ -3118,7 +3327,8 @@ pub fn _mm_maskz_min_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminud))] -pub fn _mm512_min_epu32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_min_epu32(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imin(a.as_u32x16(), b.as_u32x16()).as_m512i() } } @@ -3129,7 +3339,8 @@ pub fn _mm512_min_epu32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminud))] -pub fn _mm512_mask_min_epu32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_min_epu32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epu32(a, b).as_u32x16(); transmute(simd_select_bitmask(k, min, src.as_u32x16())) @@ -3143,7 +3354,8 @@ pub fn _mm512_mask_min_epu32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminud))] -pub fn _mm512_maskz_min_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_min_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epu32(a, b).as_u32x16(); transmute(simd_select_bitmask(k, min, u32x16::ZERO)) @@ -3157,7 +3369,8 @@ pub fn _mm512_maskz_min_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminud))] -pub fn _mm256_mask_min_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_min_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epu32(a, b).as_u32x8(); transmute(simd_select_bitmask(k, min, src.as_u32x8())) @@ -3171,7 +3384,8 @@ pub fn _mm256_mask_min_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminud))] -pub fn _mm256_maskz_min_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_min_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epu32(a, b).as_u32x8(); transmute(simd_select_bitmask(k, min, u32x8::ZERO)) @@ -3185,7 +3399,8 @@ pub fn _mm256_maskz_min_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminud))] -pub fn _mm_mask_min_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_min_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epu32(a, b).as_u32x4(); transmute(simd_select_bitmask(k, min, src.as_u32x4())) @@ -3199,7 +3414,8 @@ pub fn _mm_mask_min_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminud))] -pub fn _mm_maskz_min_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_min_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epu32(a, b).as_u32x4(); transmute(simd_select_bitmask(k, min, u32x4::ZERO)) @@ -3213,7 +3429,8 @@ pub fn _mm_maskz_min_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuq))] -pub fn _mm512_min_epu64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_min_epu64(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_imin(a.as_u64x8(), b.as_u64x8()).as_m512i() } } @@ -3224,7 +3441,8 @@ pub fn _mm512_min_epu64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuq))] -pub fn _mm512_mask_min_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_min_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epu64(a, b).as_u64x8(); transmute(simd_select_bitmask(k, min, src.as_u64x8())) @@ -3238,7 +3456,8 @@ pub fn _mm512_mask_min_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuq))] -pub fn _mm512_maskz_min_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_min_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let min = _mm512_min_epu64(a, b).as_u64x8(); transmute(simd_select_bitmask(k, min, u64x8::ZERO)) @@ -3252,7 +3471,8 @@ pub fn _mm512_maskz_min_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuq))] -pub fn _mm256_min_epu64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_min_epu64(a: __m256i, b: __m256i) -> __m256i { unsafe { simd_imin(a.as_u64x4(), b.as_u64x4()).as_m256i() } } @@ -3263,7 +3483,8 @@ pub fn _mm256_min_epu64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuq))] -pub fn _mm256_mask_min_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_min_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epu64(a, b).as_u64x4(); transmute(simd_select_bitmask(k, min, src.as_u64x4())) @@ -3277,7 +3498,8 @@ pub fn _mm256_mask_min_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuq))] -pub fn _mm256_maskz_min_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_min_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let min = _mm256_min_epu64(a, b).as_u64x4(); transmute(simd_select_bitmask(k, min, u64x4::ZERO)) @@ -3291,7 +3513,8 @@ pub fn _mm256_maskz_min_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuq))] -pub fn _mm_min_epu64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_min_epu64(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imin(a.as_u64x2(), b.as_u64x2()).as_m128i() } } @@ -3302,7 +3525,8 @@ pub fn _mm_min_epu64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuq))] -pub fn _mm_mask_min_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_min_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epu64(a, b).as_u64x2(); transmute(simd_select_bitmask(k, min, src.as_u64x2())) @@ -3316,7 +3540,8 @@ pub fn _mm_mask_min_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpminuq))] -pub fn _mm_maskz_min_epu64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_min_epu64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let min = _mm_min_epu64(a, b).as_u64x2(); transmute(simd_select_bitmask(k, min, u64x2::ZERO)) @@ -3484,7 +3709,8 @@ pub fn _mm_maskz_sqrt_pd(k: __mmask8, a: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -pub fn _mm512_fmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { simd_fma(a, b, c) } } @@ -3495,7 +3721,8 @@ pub fn _mm512_fmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -pub fn _mm512_mask_fmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmadd_ps(a, b, c), a) } } @@ -3506,7 +3733,8 @@ pub fn _mm512_mask_fmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -pub fn _mm512_maskz_fmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmadd_ps(a, b, c), _mm512_setzero_ps()) } } @@ -3517,7 +3745,8 @@ pub fn _mm512_maskz_fmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -pub fn _mm512_mask3_fmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmadd_ps(a, b, c), c) } } @@ -3528,7 +3757,8 @@ pub fn _mm512_mask3_fmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -pub fn _mm256_mask_fmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmadd_ps(a, b, c), a) } } @@ -3539,7 +3769,8 @@ pub fn _mm256_mask_fmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -pub fn _mm256_maskz_fmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmadd_ps(a, b, c), _mm256_setzero_ps()) } } @@ -3550,7 +3781,8 @@ pub fn _mm256_maskz_fmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -pub fn _mm256_mask3_fmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmadd_ps(a, b, c), c) } } @@ -3561,7 +3793,8 @@ pub fn _mm256_mask3_fmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -pub fn _mm_mask_fmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmadd_ps(a, b, c), a) } } @@ -3572,7 +3805,8 @@ pub fn _mm_mask_fmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -pub fn _mm_maskz_fmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmadd_ps(a, b, c), _mm_setzero_ps()) } } @@ -3583,7 +3817,8 @@ pub fn _mm_maskz_fmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m12 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps -pub fn _mm_mask3_fmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmadd_ps(a, b, c), c) } } @@ -3594,7 +3829,8 @@ pub fn _mm_mask3_fmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -pub fn _mm512_fmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_fma(a, b, c) } } @@ -3605,7 +3841,8 @@ pub fn _mm512_fmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -pub fn _mm512_mask_fmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmadd_pd(a, b, c), a) } } @@ -3616,7 +3853,8 @@ pub fn _mm512_mask_fmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -pub fn _mm512_maskz_fmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmadd_pd(a, b, c), _mm512_setzero_pd()) } } @@ -3627,7 +3865,8 @@ pub fn _mm512_maskz_fmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -pub fn _mm512_mask3_fmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmadd_pd(a, b, c), c) } } @@ -3638,7 +3877,8 @@ pub fn _mm512_mask3_fmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -pub fn _mm256_mask_fmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmadd_pd(a, b, c), a) } } @@ -3649,7 +3889,8 @@ pub fn _mm256_mask_fmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -pub fn _mm256_maskz_fmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmadd_pd(a, b, c), _mm256_setzero_pd()) } } @@ -3660,7 +3901,8 @@ pub fn _mm256_maskz_fmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -pub fn _mm256_mask3_fmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmadd_pd(a, b, c), c) } } @@ -3671,7 +3913,8 @@ pub fn _mm256_mask3_fmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -pub fn _mm_mask_fmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmadd_pd(a, b, c), a) } } @@ -3682,7 +3925,8 @@ pub fn _mm_mask_fmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -pub fn _mm_maskz_fmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmadd_pd(a, b, c), _mm_setzero_pd()) } } @@ -3693,7 +3937,8 @@ pub fn _mm_maskz_fmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd -pub fn _mm_mask3_fmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmadd_pd(a, b, c), c) } } @@ -3704,7 +3949,8 @@ pub fn _mm_mask3_fmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub -pub fn _mm512_fmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { simd_fma(a, b, simd_neg(c)) } } @@ -3715,7 +3961,8 @@ pub fn _mm512_fmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub -pub fn _mm512_mask_fmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmsub_ps(a, b, c), a) } } @@ -3726,7 +3973,8 @@ pub fn _mm512_mask_fmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub -pub fn _mm512_maskz_fmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmsub_ps(a, b, c), _mm512_setzero_ps()) } } @@ -3737,7 +3985,8 @@ pub fn _mm512_maskz_fmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub -pub fn _mm512_mask3_fmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmsub_ps(a, b, c), c) } } @@ -3748,7 +3997,8 @@ pub fn _mm512_mask3_fmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub -pub fn _mm256_mask_fmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmsub_ps(a, b, c), a) } } @@ -3759,7 +4009,8 @@ pub fn _mm256_mask_fmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub -pub fn _mm256_maskz_fmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmsub_ps(a, b, c), _mm256_setzero_ps()) } } @@ -3770,7 +4021,8 @@ pub fn _mm256_maskz_fmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub -pub fn _mm256_mask3_fmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmsub_ps(a, b, c), c) } } @@ -3781,7 +4033,8 @@ pub fn _mm256_mask3_fmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub -pub fn _mm_mask_fmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmsub_ps(a, b, c), a) } } @@ -3792,7 +4045,8 @@ pub fn _mm_mask_fmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub -pub fn _mm_maskz_fmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmsub_ps(a, b, c), _mm_setzero_ps()) } } @@ -3803,7 +4057,8 @@ pub fn _mm_maskz_fmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m12 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub -pub fn _mm_mask3_fmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmsub_ps(a, b, c), c) } } @@ -3814,7 +4069,8 @@ pub fn _mm_mask3_fmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub -pub fn _mm512_fmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_fma(a, b, simd_neg(c)) } } @@ -3825,7 +4081,8 @@ pub fn _mm512_fmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub -pub fn _mm512_mask_fmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmsub_pd(a, b, c), a) } } @@ -3836,7 +4093,8 @@ pub fn _mm512_mask_fmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub -pub fn _mm512_maskz_fmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmsub_pd(a, b, c), _mm512_setzero_pd()) } } @@ -3847,7 +4105,8 @@ pub fn _mm512_maskz_fmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub -pub fn _mm512_mask3_fmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmsub_pd(a, b, c), c) } } @@ -3858,7 +4117,8 @@ pub fn _mm512_mask3_fmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub -pub fn _mm256_mask_fmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmsub_pd(a, b, c), a) } } @@ -3869,7 +4129,8 @@ pub fn _mm256_mask_fmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub -pub fn _mm256_maskz_fmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmsub_pd(a, b, c), _mm256_setzero_pd()) } } @@ -3880,7 +4141,8 @@ pub fn _mm256_maskz_fmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub -pub fn _mm256_mask3_fmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmsub_pd(a, b, c), c) } } @@ -3891,7 +4153,8 @@ pub fn _mm256_mask3_fmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub -pub fn _mm_mask_fmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmsub_pd(a, b, c), a) } } @@ -3902,7 +4165,8 @@ pub fn _mm_mask_fmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub -pub fn _mm_maskz_fmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmsub_pd(a, b, c), _mm_setzero_pd()) } } @@ -3913,7 +4177,8 @@ pub fn _mm_maskz_fmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub -pub fn _mm_mask3_fmsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmsub_pd(a, b, c), c) } } @@ -3924,7 +4189,8 @@ pub fn _mm_mask3_fmsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -pub fn _mm512_fmaddsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmaddsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -3943,7 +4209,8 @@ pub fn _mm512_fmaddsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -pub fn _mm512_mask_fmaddsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmaddsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmaddsub_ps(a, b, c), a) } } @@ -3954,7 +4221,8 @@ pub fn _mm512_mask_fmaddsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -pub fn _mm512_maskz_fmaddsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmaddsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmaddsub_ps(a, b, c), _mm512_setzero_ps()) } } @@ -3965,7 +4233,8 @@ pub fn _mm512_maskz_fmaddsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) - #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -pub fn _mm512_mask3_fmaddsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmaddsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmaddsub_ps(a, b, c), c) } } @@ -3976,7 +4245,8 @@ pub fn _mm512_mask3_fmaddsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -pub fn _mm256_mask_fmaddsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmaddsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmaddsub_ps(a, b, c), a) } } @@ -3987,7 +4257,8 @@ pub fn _mm256_mask_fmaddsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -pub fn _mm256_maskz_fmaddsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmaddsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmaddsub_ps(a, b, c), _mm256_setzero_ps()) } } @@ -3998,7 +4269,8 @@ pub fn _mm256_maskz_fmaddsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -pub fn _mm256_mask3_fmaddsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmaddsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmaddsub_ps(a, b, c), c) } } @@ -4009,7 +4281,8 @@ pub fn _mm256_mask3_fmaddsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -pub fn _mm_mask_fmaddsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmaddsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmaddsub_ps(a, b, c), a) } } @@ -4020,7 +4293,8 @@ pub fn _mm_mask_fmaddsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -pub fn _mm_maskz_fmaddsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmaddsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmaddsub_ps(a, b, c), _mm_setzero_ps()) } } @@ -4031,7 +4305,8 @@ pub fn _mm_maskz_fmaddsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps -pub fn _mm_mask3_fmaddsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmaddsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmaddsub_ps(a, b, c), c) } } @@ -4042,7 +4317,8 @@ pub fn _mm_mask3_fmaddsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -pub fn _mm512_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -4057,7 +4333,8 @@ pub fn _mm512_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -pub fn _mm512_mask_fmaddsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmaddsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmaddsub_pd(a, b, c), a) } } @@ -4068,7 +4345,8 @@ pub fn _mm512_mask_fmaddsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -pub fn _mm512_maskz_fmaddsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmaddsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmaddsub_pd(a, b, c), _mm512_setzero_pd()) } } @@ -4079,7 +4357,8 @@ pub fn _mm512_maskz_fmaddsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -pub fn _mm512_mask3_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmaddsub_pd(a, b, c), c) } } @@ -4090,7 +4369,8 @@ pub fn _mm512_mask3_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -pub fn _mm256_mask_fmaddsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmaddsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmaddsub_pd(a, b, c), a) } } @@ -4101,7 +4381,8 @@ pub fn _mm256_mask_fmaddsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -pub fn _mm256_maskz_fmaddsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmaddsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmaddsub_pd(a, b, c), _mm256_setzero_pd()) } } @@ -4112,7 +4393,8 @@ pub fn _mm256_maskz_fmaddsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -pub fn _mm256_mask3_fmaddsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmaddsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmaddsub_pd(a, b, c), c) } } @@ -4123,7 +4405,8 @@ pub fn _mm256_mask3_fmaddsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -pub fn _mm_mask_fmaddsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmaddsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmaddsub_pd(a, b, c), a) } } @@ -4134,7 +4417,8 @@ pub fn _mm_mask_fmaddsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -pub fn _mm_maskz_fmaddsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmaddsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmaddsub_pd(a, b, c), _mm_setzero_pd()) } } @@ -4145,7 +4429,8 @@ pub fn _mm_maskz_fmaddsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd -pub fn _mm_mask3_fmaddsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmaddsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmaddsub_pd(a, b, c), c) } } @@ -4156,7 +4441,8 @@ pub fn _mm_mask3_fmaddsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -pub fn _mm512_fmsubadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmsubadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -4175,7 +4461,8 @@ pub fn _mm512_fmsubadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -pub fn _mm512_mask_fmsubadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmsubadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmsubadd_ps(a, b, c), a) } } @@ -4186,7 +4473,8 @@ pub fn _mm512_mask_fmsubadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -pub fn _mm512_maskz_fmsubadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmsubadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmsubadd_ps(a, b, c), _mm512_setzero_ps()) } } @@ -4197,7 +4485,8 @@ pub fn _mm512_maskz_fmsubadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) - #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -pub fn _mm512_mask3_fmsubadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmsubadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fmsubadd_ps(a, b, c), c) } } @@ -4208,7 +4497,8 @@ pub fn _mm512_mask3_fmsubadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -pub fn _mm256_mask_fmsubadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmsubadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmsubadd_ps(a, b, c), a) } } @@ -4219,7 +4509,8 @@ pub fn _mm256_mask_fmsubadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -pub fn _mm256_maskz_fmsubadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmsubadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmsubadd_ps(a, b, c), _mm256_setzero_ps()) } } @@ -4230,7 +4521,8 @@ pub fn _mm256_maskz_fmsubadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -pub fn _mm256_mask3_fmsubadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmsubadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fmsubadd_ps(a, b, c), c) } } @@ -4241,7 +4533,8 @@ pub fn _mm256_mask3_fmsubadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -pub fn _mm_mask_fmsubadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmsubadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmsubadd_ps(a, b, c), a) } } @@ -4252,7 +4545,8 @@ pub fn _mm_mask_fmsubadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -pub fn _mm_maskz_fmsubadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmsubadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmsubadd_ps(a, b, c), _mm_setzero_ps()) } } @@ -4263,7 +4557,8 @@ pub fn _mm_maskz_fmsubadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps -pub fn _mm_mask3_fmsubadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmsubadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fmsubadd_ps(a, b, c), c) } } @@ -4274,7 +4569,8 @@ pub fn _mm_mask3_fmsubadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -pub fn _mm512_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -4289,7 +4585,8 @@ pub fn _mm512_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -pub fn _mm512_mask_fmsubadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmsubadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmsubadd_pd(a, b, c), a) } } @@ -4300,7 +4597,8 @@ pub fn _mm512_mask_fmsubadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -pub fn _mm512_maskz_fmsubadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmsubadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmsubadd_pd(a, b, c), _mm512_setzero_pd()) } } @@ -4311,7 +4609,8 @@ pub fn _mm512_maskz_fmsubadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -pub fn _mm512_mask3_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fmsubadd_pd(a, b, c), c) } } @@ -4322,7 +4621,8 @@ pub fn _mm512_mask3_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -pub fn _mm256_mask_fmsubadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmsubadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmsubadd_pd(a, b, c), a) } } @@ -4333,7 +4633,8 @@ pub fn _mm256_mask_fmsubadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -pub fn _mm256_maskz_fmsubadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmsubadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmsubadd_pd(a, b, c), _mm256_setzero_pd()) } } @@ -4344,7 +4645,8 @@ pub fn _mm256_maskz_fmsubadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -pub fn _mm256_mask3_fmsubadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmsubadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fmsubadd_pd(a, b, c), c) } } @@ -4355,7 +4657,8 @@ pub fn _mm256_mask3_fmsubadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -pub fn _mm_mask_fmsubadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmsubadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmsubadd_pd(a, b, c), a) } } @@ -4366,7 +4669,8 @@ pub fn _mm_mask_fmsubadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -pub fn _mm_maskz_fmsubadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmsubadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmsubadd_pd(a, b, c), _mm_setzero_pd()) } } @@ -4377,7 +4681,8 @@ pub fn _mm_maskz_fmsubadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd -pub fn _mm_mask3_fmsubadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmsubadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fmsubadd_pd(a, b, c), c) } } @@ -4388,7 +4693,8 @@ pub fn _mm_mask3_fmsubadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -pub fn _mm512_fnmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fnmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { simd_fma(simd_neg(a), b, c) } } @@ -4399,7 +4705,8 @@ pub fn _mm512_fnmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -pub fn _mm512_mask_fnmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fnmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fnmadd_ps(a, b, c), a) } } @@ -4410,7 +4717,8 @@ pub fn _mm512_mask_fnmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -pub fn _mm512_maskz_fnmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fnmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fnmadd_ps(a, b, c), _mm512_setzero_ps()) } } @@ -4421,7 +4729,8 @@ pub fn _mm512_maskz_fnmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -pub fn _mm512_mask3_fnmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fnmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fnmadd_ps(a, b, c), c) } } @@ -4432,7 +4741,8 @@ pub fn _mm512_mask3_fnmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -pub fn _mm256_mask_fnmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fnmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fnmadd_ps(a, b, c), a) } } @@ -4443,7 +4753,8 @@ pub fn _mm256_mask_fnmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -pub fn _mm256_maskz_fnmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fnmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fnmadd_ps(a, b, c), _mm256_setzero_ps()) } } @@ -4454,7 +4765,8 @@ pub fn _mm256_maskz_fnmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -pub fn _mm256_mask3_fnmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fnmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fnmadd_ps(a, b, c), c) } } @@ -4465,7 +4777,8 @@ pub fn _mm256_mask3_fnmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -pub fn _mm_mask_fnmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fnmadd_ps(a, b, c), a) } } @@ -4476,7 +4789,8 @@ pub fn _mm_mask_fnmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m12 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -pub fn _mm_maskz_fnmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fnmadd_ps(a, b, c), _mm_setzero_ps()) } } @@ -4487,7 +4801,8 @@ pub fn _mm_maskz_fnmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m1 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps -pub fn _mm_mask3_fnmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fnmadd_ps(a, b, c), c) } } @@ -4498,7 +4813,8 @@ pub fn _mm_mask3_fnmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -pub fn _mm512_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_fma(simd_neg(a), b, c) } } @@ -4509,7 +4825,8 @@ pub fn _mm512_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -pub fn _mm512_mask_fnmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fnmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fnmadd_pd(a, b, c), a) } } @@ -4520,7 +4837,8 @@ pub fn _mm512_mask_fnmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -pub fn _mm512_maskz_fnmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fnmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fnmadd_pd(a, b, c), _mm512_setzero_pd()) } } @@ -4531,7 +4849,8 @@ pub fn _mm512_maskz_fnmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) - #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -pub fn _mm512_mask3_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fnmadd_pd(a, b, c), c) } } @@ -4542,7 +4861,8 @@ pub fn _mm512_mask3_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -pub fn _mm256_mask_fnmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fnmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fnmadd_pd(a, b, c), a) } } @@ -4553,7 +4873,8 @@ pub fn _mm256_mask_fnmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -pub fn _mm256_maskz_fnmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fnmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fnmadd_pd(a, b, c), _mm256_setzero_pd()) } } @@ -4564,7 +4885,8 @@ pub fn _mm256_maskz_fnmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -pub fn _mm256_mask3_fnmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fnmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fnmadd_pd(a, b, c), c) } } @@ -4575,7 +4897,8 @@ pub fn _mm256_mask3_fnmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -pub fn _mm_mask_fnmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fnmadd_pd(a, b, c), a) } } @@ -4586,7 +4909,8 @@ pub fn _mm_mask_fnmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -pub fn _mm_maskz_fnmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fnmadd_pd(a, b, c), _mm_setzero_pd()) } } @@ -4597,7 +4921,8 @@ pub fn _mm_maskz_fnmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd -pub fn _mm_mask3_fnmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fnmadd_pd(a, b, c), c) } } @@ -4608,7 +4933,8 @@ pub fn _mm_mask3_fnmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -pub fn _mm512_fnmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fnmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) } } @@ -4619,7 +4945,8 @@ pub fn _mm512_fnmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -pub fn _mm512_mask_fnmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fnmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fnmsub_ps(a, b, c), a) } } @@ -4630,7 +4957,8 @@ pub fn _mm512_mask_fnmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -pub fn _mm512_maskz_fnmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fnmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fnmsub_ps(a, b, c), _mm512_setzero_ps()) } } @@ -4641,7 +4969,8 @@ pub fn _mm512_maskz_fnmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -pub fn _mm512_mask3_fnmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fnmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { unsafe { simd_select_bitmask(k, _mm512_fnmsub_ps(a, b, c), c) } } @@ -4652,7 +4981,8 @@ pub fn _mm512_mask3_fnmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -pub fn _mm256_mask_fnmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fnmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fnmsub_ps(a, b, c), a) } } @@ -4663,7 +4993,8 @@ pub fn _mm256_mask_fnmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -pub fn _mm256_maskz_fnmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fnmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fnmsub_ps(a, b, c), _mm256_setzero_ps()) } } @@ -4674,7 +5005,8 @@ pub fn _mm256_maskz_fnmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -pub fn _mm256_mask3_fnmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fnmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { unsafe { simd_select_bitmask(k, _mm256_fnmsub_ps(a, b, c), c) } } @@ -4685,7 +5017,8 @@ pub fn _mm256_mask3_fnmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -pub fn _mm_mask_fnmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fnmsub_ps(a, b, c), a) } } @@ -4696,7 +5029,8 @@ pub fn _mm_mask_fnmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m12 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -pub fn _mm_maskz_fnmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fnmsub_ps(a, b, c), _mm_setzero_ps()) } } @@ -4707,7 +5041,8 @@ pub fn _mm_maskz_fnmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m1 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps -pub fn _mm_mask3_fnmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { unsafe { simd_select_bitmask(k, _mm_fnmsub_ps(a, b, c), c) } } @@ -4718,7 +5053,8 @@ pub fn _mm_mask3_fnmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -pub fn _mm512_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) } } @@ -4729,7 +5065,8 @@ pub fn _mm512_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -pub fn _mm512_mask_fnmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fnmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fnmsub_pd(a, b, c), a) } } @@ -4740,7 +5077,8 @@ pub fn _mm512_mask_fnmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -pub fn _mm512_maskz_fnmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fnmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fnmsub_pd(a, b, c), _mm512_setzero_pd()) } } @@ -4751,7 +5089,8 @@ pub fn _mm512_maskz_fnmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) - #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -pub fn _mm512_mask3_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { unsafe { simd_select_bitmask(k, _mm512_fnmsub_pd(a, b, c), c) } } @@ -4762,7 +5101,8 @@ pub fn _mm512_mask3_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -pub fn _mm256_mask_fnmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fnmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fnmsub_pd(a, b, c), a) } } @@ -4773,7 +5113,8 @@ pub fn _mm256_mask_fnmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -pub fn _mm256_maskz_fnmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fnmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fnmsub_pd(a, b, c), _mm256_setzero_pd()) } } @@ -4784,7 +5125,8 @@ pub fn _mm256_maskz_fnmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -pub fn _mm256_mask3_fnmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fnmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { unsafe { simd_select_bitmask(k, _mm256_fnmsub_pd(a, b, c), c) } } @@ -4795,7 +5137,8 @@ pub fn _mm256_mask3_fnmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -pub fn _mm_mask_fnmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fnmsub_pd(a, b, c), a) } } @@ -4806,7 +5149,8 @@ pub fn _mm_mask_fnmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -pub fn _mm_maskz_fnmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fnmsub_pd(a, b, c), _mm_setzero_pd()) } } @@ -4817,7 +5161,8 @@ pub fn _mm_maskz_fnmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd -pub fn _mm_mask3_fnmsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { unsafe { simd_select_bitmask(k, _mm_fnmsub_pd(a, b, c), c) } } @@ -11446,7 +11791,8 @@ pub fn _mm512_mask_cvtpd_pslo(src: __m512, k: __mmask8, v2: __m512d) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbd))] -pub fn _mm512_cvtepi8_epi32(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi8_epi32(a: __m128i) -> __m512i { unsafe { let a = a.as_i8x16(); transmute::(simd_cast(a)) @@ -11460,7 +11806,8 @@ pub fn _mm512_cvtepi8_epi32(a: __m128i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbd))] -pub fn _mm512_mask_cvtepi8_epi32(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi8_epi32(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepi8_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, convert, src.as_i32x16())) @@ -11474,7 +11821,8 @@ pub fn _mm512_mask_cvtepi8_epi32(src: __m512i, k: __mmask16, a: __m128i) -> __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbd))] -pub fn _mm512_maskz_cvtepi8_epi32(k: __mmask16, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi8_epi32(k: __mmask16, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepi8_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, convert, i32x16::ZERO)) @@ -11488,7 +11836,8 @@ pub fn _mm512_maskz_cvtepi8_epi32(k: __mmask16, a: __m128i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbd))] -pub fn _mm256_mask_cvtepi8_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepi8_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi8_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, convert, src.as_i32x8())) @@ -11502,7 +11851,8 @@ pub fn _mm256_mask_cvtepi8_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbd))] -pub fn _mm256_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi8_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, convert, i32x8::ZERO)) @@ -11516,7 +11866,8 @@ pub fn _mm256_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbd))] -pub fn _mm_mask_cvtepi8_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepi8_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi8_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, convert, src.as_i32x4())) @@ -11530,7 +11881,8 @@ pub fn _mm_mask_cvtepi8_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbd))] -pub fn _mm_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi8_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, convert, i32x4::ZERO)) @@ -11544,7 +11896,8 @@ pub fn _mm_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbq))] -pub fn _mm512_cvtepi8_epi64(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi8_epi64(a: __m128i) -> __m512i { unsafe { let a = a.as_i8x16(); let v64: i8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -11559,7 +11912,8 @@ pub fn _mm512_cvtepi8_epi64(a: __m128i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbq))] -pub fn _mm512_mask_cvtepi8_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi8_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepi8_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, src.as_i64x8())) @@ -11573,7 +11927,8 @@ pub fn _mm512_mask_cvtepi8_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbq))] -pub fn _mm512_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepi8_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, i64x8::ZERO)) @@ -11587,7 +11942,8 @@ pub fn _mm512_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbq))] -pub fn _mm256_mask_cvtepi8_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepi8_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi8_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, src.as_i64x4())) @@ -11601,7 +11957,8 @@ pub fn _mm256_mask_cvtepi8_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbq))] -pub fn _mm256_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi8_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, i64x4::ZERO)) @@ -11615,7 +11972,8 @@ pub fn _mm256_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbq))] -pub fn _mm_mask_cvtepi8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepi8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi8_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, src.as_i64x2())) @@ -11629,7 +11987,8 @@ pub fn _mm_mask_cvtepi8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxbq))] -pub fn _mm_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi8_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, i64x2::ZERO)) @@ -11643,7 +12002,8 @@ pub fn _mm_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbd))] -pub fn _mm512_cvtepu8_epi32(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepu8_epi32(a: __m128i) -> __m512i { unsafe { let a = a.as_u8x16(); transmute::(simd_cast(a)) @@ -11657,7 +12017,8 @@ pub fn _mm512_cvtepu8_epi32(a: __m128i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbd))] -pub fn _mm512_mask_cvtepu8_epi32(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepu8_epi32(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepu8_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, convert, src.as_i32x16())) @@ -11671,7 +12032,8 @@ pub fn _mm512_mask_cvtepu8_epi32(src: __m512i, k: __mmask16, a: __m128i) -> __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbd))] -pub fn _mm512_maskz_cvtepu8_epi32(k: __mmask16, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepu8_epi32(k: __mmask16, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepu8_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, convert, i32x16::ZERO)) @@ -11685,7 +12047,8 @@ pub fn _mm512_maskz_cvtepu8_epi32(k: __mmask16, a: __m128i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbd))] -pub fn _mm256_mask_cvtepu8_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepu8_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu8_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, convert, src.as_i32x8())) @@ -11699,7 +12062,8 @@ pub fn _mm256_mask_cvtepu8_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbd))] -pub fn _mm256_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu8_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, convert, i32x8::ZERO)) @@ -11713,7 +12077,8 @@ pub fn _mm256_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbd))] -pub fn _mm_mask_cvtepu8_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepu8_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu8_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, convert, src.as_i32x4())) @@ -11727,7 +12092,8 @@ pub fn _mm_mask_cvtepu8_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbd))] -pub fn _mm_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu8_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, convert, i32x4::ZERO)) @@ -11741,7 +12107,8 @@ pub fn _mm_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbq))] -pub fn _mm512_cvtepu8_epi64(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepu8_epi64(a: __m128i) -> __m512i { unsafe { let a = a.as_u8x16(); let v64: u8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -11756,7 +12123,8 @@ pub fn _mm512_cvtepu8_epi64(a: __m128i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbq))] -pub fn _mm512_mask_cvtepu8_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepu8_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepu8_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, src.as_i64x8())) @@ -11770,7 +12138,8 @@ pub fn _mm512_mask_cvtepu8_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbq))] -pub fn _mm512_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepu8_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, i64x8::ZERO)) @@ -11784,7 +12153,8 @@ pub fn _mm512_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbq))] -pub fn _mm256_mask_cvtepu8_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepu8_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu8_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, src.as_i64x4())) @@ -11798,7 +12168,8 @@ pub fn _mm256_mask_cvtepu8_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbq))] -pub fn _mm256_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu8_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, i64x4::ZERO)) @@ -11812,7 +12183,8 @@ pub fn _mm256_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbq))] -pub fn _mm_mask_cvtepu8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepu8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu8_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, src.as_i64x2())) @@ -11826,7 +12198,8 @@ pub fn _mm_mask_cvtepu8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxbq))] -pub fn _mm_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu8_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, i64x2::ZERO)) @@ -11840,7 +12213,8 @@ pub fn _mm_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwd))] -pub fn _mm512_cvtepi16_epi32(a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi16_epi32(a: __m256i) -> __m512i { unsafe { let a = a.as_i16x16(); transmute::(simd_cast(a)) @@ -11854,7 +12228,8 @@ pub fn _mm512_cvtepi16_epi32(a: __m256i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwd))] -pub fn _mm512_mask_cvtepi16_epi32(src: __m512i, k: __mmask16, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi16_epi32(src: __m512i, k: __mmask16, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepi16_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, convert, src.as_i32x16())) @@ -11868,7 +12243,8 @@ pub fn _mm512_mask_cvtepi16_epi32(src: __m512i, k: __mmask16, a: __m256i) -> __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwd))] -pub fn _mm512_maskz_cvtepi16_epi32(k: __mmask16, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi16_epi32(k: __mmask16, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepi16_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, convert, i32x16::ZERO)) @@ -11882,7 +12258,8 @@ pub fn _mm512_maskz_cvtepi16_epi32(k: __mmask16, a: __m256i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwd))] -pub fn _mm256_mask_cvtepi16_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepi16_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi16_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, convert, src.as_i32x8())) @@ -11896,7 +12273,8 @@ pub fn _mm256_mask_cvtepi16_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwd))] -pub fn _mm256_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi16_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, convert, i32x8::ZERO)) @@ -11910,7 +12288,8 @@ pub fn _mm256_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwd))] -pub fn _mm_mask_cvtepi16_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepi16_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi16_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, convert, src.as_i32x4())) @@ -11924,7 +12303,8 @@ pub fn _mm_mask_cvtepi16_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwd))] -pub fn _mm_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi16_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, convert, i32x4::ZERO)) @@ -11938,7 +12318,8 @@ pub fn _mm_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwq))] -pub fn _mm512_cvtepi16_epi64(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi16_epi64(a: __m128i) -> __m512i { unsafe { let a = a.as_i16x8(); transmute::(simd_cast(a)) @@ -11952,7 +12333,8 @@ pub fn _mm512_cvtepi16_epi64(a: __m128i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwq))] -pub fn _mm512_mask_cvtepi16_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi16_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepi16_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, src.as_i64x8())) @@ -11966,7 +12348,8 @@ pub fn _mm512_mask_cvtepi16_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwq))] -pub fn _mm512_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepi16_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, i64x8::ZERO)) @@ -11980,7 +12363,8 @@ pub fn _mm512_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwq))] -pub fn _mm256_mask_cvtepi16_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepi16_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi16_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, src.as_i64x4())) @@ -11994,7 +12378,8 @@ pub fn _mm256_mask_cvtepi16_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwq))] -pub fn _mm256_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi16_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, i64x4::ZERO)) @@ -12008,7 +12393,8 @@ pub fn _mm256_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwq))] -pub fn _mm_mask_cvtepi16_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepi16_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi16_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, src.as_i64x2())) @@ -12022,7 +12408,8 @@ pub fn _mm_mask_cvtepi16_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxwq))] -pub fn _mm_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi16_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, i64x2::ZERO)) @@ -12036,7 +12423,8 @@ pub fn _mm_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwd))] -pub fn _mm512_cvtepu16_epi32(a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepu16_epi32(a: __m256i) -> __m512i { unsafe { let a = a.as_u16x16(); transmute::(simd_cast(a)) @@ -12050,7 +12438,8 @@ pub fn _mm512_cvtepu16_epi32(a: __m256i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwd))] -pub fn _mm512_mask_cvtepu16_epi32(src: __m512i, k: __mmask16, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepu16_epi32(src: __m512i, k: __mmask16, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepu16_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, convert, src.as_i32x16())) @@ -12064,7 +12453,8 @@ pub fn _mm512_mask_cvtepu16_epi32(src: __m512i, k: __mmask16, a: __m256i) -> __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwd))] -pub fn _mm512_maskz_cvtepu16_epi32(k: __mmask16, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepu16_epi32(k: __mmask16, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepu16_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, convert, i32x16::ZERO)) @@ -12078,7 +12468,8 @@ pub fn _mm512_maskz_cvtepu16_epi32(k: __mmask16, a: __m256i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwd))] -pub fn _mm256_mask_cvtepu16_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepu16_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu16_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, convert, src.as_i32x8())) @@ -12092,7 +12483,8 @@ pub fn _mm256_mask_cvtepu16_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwd))] -pub fn _mm256_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu16_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, convert, i32x8::ZERO)) @@ -12106,7 +12498,8 @@ pub fn _mm256_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwd))] -pub fn _mm_mask_cvtepu16_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepu16_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu16_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, convert, src.as_i32x4())) @@ -12120,7 +12513,8 @@ pub fn _mm_mask_cvtepu16_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwd))] -pub fn _mm_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu16_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, convert, i32x4::ZERO)) @@ -12134,7 +12528,8 @@ pub fn _mm_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwq))] -pub fn _mm512_cvtepu16_epi64(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepu16_epi64(a: __m128i) -> __m512i { unsafe { let a = a.as_u16x8(); transmute::(simd_cast(a)) @@ -12148,7 +12543,8 @@ pub fn _mm512_cvtepu16_epi64(a: __m128i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwq))] -pub fn _mm512_mask_cvtepu16_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepu16_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepu16_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, src.as_i64x8())) @@ -12162,7 +12558,8 @@ pub fn _mm512_mask_cvtepu16_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwq))] -pub fn _mm512_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m512i { unsafe { let convert = _mm512_cvtepu16_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, i64x8::ZERO)) @@ -12176,7 +12573,8 @@ pub fn _mm512_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwq))] -pub fn _mm256_mask_cvtepu16_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepu16_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu16_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, src.as_i64x4())) @@ -12190,7 +12588,8 @@ pub fn _mm256_mask_cvtepu16_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwq))] -pub fn _mm256_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu16_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, i64x4::ZERO)) @@ -12204,7 +12603,8 @@ pub fn _mm256_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwq))] -pub fn _mm_mask_cvtepu16_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepu16_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu16_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, src.as_i64x2())) @@ -12218,7 +12618,8 @@ pub fn _mm_mask_cvtepu16_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxwq))] -pub fn _mm_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu16_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, i64x2::ZERO)) @@ -12232,7 +12633,8 @@ pub fn _mm_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxdq))] -pub fn _mm512_cvtepi32_epi64(a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi32_epi64(a: __m256i) -> __m512i { unsafe { let a = a.as_i32x8(); transmute::(simd_cast(a)) @@ -12246,7 +12648,8 @@ pub fn _mm512_cvtepi32_epi64(a: __m256i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxdq))] -pub fn _mm512_mask_cvtepi32_epi64(src: __m512i, k: __mmask8, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi32_epi64(src: __m512i, k: __mmask8, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepi32_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, src.as_i64x8())) @@ -12260,7 +12663,8 @@ pub fn _mm512_mask_cvtepi32_epi64(src: __m512i, k: __mmask8, a: __m256i) -> __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxdq))] -pub fn _mm512_maskz_cvtepi32_epi64(k: __mmask8, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi32_epi64(k: __mmask8, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepi32_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, i64x8::ZERO)) @@ -12274,7 +12678,8 @@ pub fn _mm512_maskz_cvtepi32_epi64(k: __mmask8, a: __m256i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxdq))] -pub fn _mm256_mask_cvtepi32_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepi32_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi32_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, src.as_i64x4())) @@ -12288,7 +12693,8 @@ pub fn _mm256_mask_cvtepi32_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxdq))] -pub fn _mm256_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepi32_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, i64x4::ZERO)) @@ -12302,7 +12708,8 @@ pub fn _mm256_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxdq))] -pub fn _mm_mask_cvtepi32_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepi32_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi32_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, src.as_i64x2())) @@ -12316,7 +12723,8 @@ pub fn _mm_mask_cvtepi32_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovsxdq))] -pub fn _mm_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepi32_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, i64x2::ZERO)) @@ -12330,7 +12738,8 @@ pub fn _mm_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxdq))] -pub fn _mm512_cvtepu32_epi64(a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepu32_epi64(a: __m256i) -> __m512i { unsafe { let a = a.as_u32x8(); transmute::(simd_cast(a)) @@ -12344,7 +12753,8 @@ pub fn _mm512_cvtepu32_epi64(a: __m256i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxdq))] -pub fn _mm512_mask_cvtepu32_epi64(src: __m512i, k: __mmask8, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepu32_epi64(src: __m512i, k: __mmask8, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepu32_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, src.as_i64x8())) @@ -12358,7 +12768,8 @@ pub fn _mm512_mask_cvtepu32_epi64(src: __m512i, k: __mmask8, a: __m256i) -> __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxdq))] -pub fn _mm512_maskz_cvtepu32_epi64(k: __mmask8, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepu32_epi64(k: __mmask8, a: __m256i) -> __m512i { unsafe { let convert = _mm512_cvtepu32_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, convert, i64x8::ZERO)) @@ -12372,7 +12783,8 @@ pub fn _mm512_maskz_cvtepu32_epi64(k: __mmask8, a: __m256i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxdq))] -pub fn _mm256_mask_cvtepu32_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepu32_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu32_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, src.as_i64x4())) @@ -12386,7 +12798,8 @@ pub fn _mm256_mask_cvtepu32_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxdq))] -pub fn _mm256_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m256i { unsafe { let convert = _mm256_cvtepu32_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, convert, i64x4::ZERO)) @@ -12400,7 +12813,8 @@ pub fn _mm256_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxdq))] -pub fn _mm_mask_cvtepu32_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepu32_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu32_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, src.as_i64x2())) @@ -12414,7 +12828,8 @@ pub fn _mm_mask_cvtepu32_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovzxdq))] -pub fn _mm_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { let convert = _mm_cvtepu32_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, convert, i64x2::ZERO)) @@ -12428,7 +12843,8 @@ pub fn _mm_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] -pub fn _mm512_cvtepi32_ps(a: __m512i) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi32_ps(a: __m512i) -> __m512 { unsafe { let a = a.as_i32x16(); transmute::(simd_cast(a)) @@ -12442,7 +12858,8 @@ pub fn _mm512_cvtepi32_ps(a: __m512i) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] -pub fn _mm512_mask_cvtepi32_ps(src: __m512, k: __mmask16, a: __m512i) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi32_ps(src: __m512, k: __mmask16, a: __m512i) -> __m512 { unsafe { let convert = _mm512_cvtepi32_ps(a).as_f32x16(); transmute(simd_select_bitmask(k, convert, src.as_f32x16())) @@ -12456,7 +12873,8 @@ pub fn _mm512_mask_cvtepi32_ps(src: __m512, k: __mmask16, a: __m512i) -> __m512 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] -pub fn _mm512_maskz_cvtepi32_ps(k: __mmask16, a: __m512i) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi32_ps(k: __mmask16, a: __m512i) -> __m512 { unsafe { let convert = _mm512_cvtepi32_ps(a).as_f32x16(); transmute(simd_select_bitmask(k, convert, f32x16::ZERO)) @@ -12470,7 +12888,8 @@ pub fn _mm512_maskz_cvtepi32_ps(k: __mmask16, a: __m512i) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] -pub fn _mm256_mask_cvtepi32_ps(src: __m256, k: __mmask8, a: __m256i) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepi32_ps(src: __m256, k: __mmask8, a: __m256i) -> __m256 { unsafe { let convert = _mm256_cvtepi32_ps(a).as_f32x8(); transmute(simd_select_bitmask(k, convert, src.as_f32x8())) @@ -12484,7 +12903,8 @@ pub fn _mm256_mask_cvtepi32_ps(src: __m256, k: __mmask8, a: __m256i) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] -pub fn _mm256_maskz_cvtepi32_ps(k: __mmask8, a: __m256i) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepi32_ps(k: __mmask8, a: __m256i) -> __m256 { unsafe { let convert = _mm256_cvtepi32_ps(a).as_f32x8(); transmute(simd_select_bitmask(k, convert, f32x8::ZERO)) @@ -12498,7 +12918,8 @@ pub fn _mm256_maskz_cvtepi32_ps(k: __mmask8, a: __m256i) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] -pub fn _mm_mask_cvtepi32_ps(src: __m128, k: __mmask8, a: __m128i) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepi32_ps(src: __m128, k: __mmask8, a: __m128i) -> __m128 { unsafe { let convert = _mm_cvtepi32_ps(a).as_f32x4(); transmute(simd_select_bitmask(k, convert, src.as_f32x4())) @@ -12512,7 +12933,8 @@ pub fn _mm_mask_cvtepi32_ps(src: __m128, k: __mmask8, a: __m128i) -> __m128 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] -pub fn _mm_maskz_cvtepi32_ps(k: __mmask8, a: __m128i) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepi32_ps(k: __mmask8, a: __m128i) -> __m128 { unsafe { let convert = _mm_cvtepi32_ps(a).as_f32x4(); transmute(simd_select_bitmask(k, convert, f32x4::ZERO)) @@ -12526,7 +12948,8 @@ pub fn _mm_maskz_cvtepi32_ps(k: __mmask8, a: __m128i) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] -pub fn _mm512_cvtepi32_pd(a: __m256i) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi32_pd(a: __m256i) -> __m512d { unsafe { let a = a.as_i32x8(); transmute::(simd_cast(a)) @@ -12540,7 +12963,8 @@ pub fn _mm512_cvtepi32_pd(a: __m256i) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] -pub fn _mm512_mask_cvtepi32_pd(src: __m512d, k: __mmask8, a: __m256i) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi32_pd(src: __m512d, k: __mmask8, a: __m256i) -> __m512d { unsafe { let convert = _mm512_cvtepi32_pd(a).as_f64x8(); transmute(simd_select_bitmask(k, convert, src.as_f64x8())) @@ -12554,7 +12978,8 @@ pub fn _mm512_mask_cvtepi32_pd(src: __m512d, k: __mmask8, a: __m256i) -> __m512d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] -pub fn _mm512_maskz_cvtepi32_pd(k: __mmask8, a: __m256i) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi32_pd(k: __mmask8, a: __m256i) -> __m512d { unsafe { let convert = _mm512_cvtepi32_pd(a).as_f64x8(); transmute(simd_select_bitmask(k, convert, f64x8::ZERO)) @@ -12568,7 +12993,8 @@ pub fn _mm512_maskz_cvtepi32_pd(k: __mmask8, a: __m256i) -> __m512d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] -pub fn _mm256_mask_cvtepi32_pd(src: __m256d, k: __mmask8, a: __m128i) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepi32_pd(src: __m256d, k: __mmask8, a: __m128i) -> __m256d { unsafe { let convert = _mm256_cvtepi32_pd(a).as_f64x4(); transmute(simd_select_bitmask(k, convert, src.as_f64x4())) @@ -12582,7 +13008,8 @@ pub fn _mm256_mask_cvtepi32_pd(src: __m256d, k: __mmask8, a: __m128i) -> __m256d #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] -pub fn _mm256_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m256d { unsafe { let convert = _mm256_cvtepi32_pd(a).as_f64x4(); transmute(simd_select_bitmask(k, convert, f64x4::ZERO)) @@ -12596,7 +13023,8 @@ pub fn _mm256_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] -pub fn _mm_mask_cvtepi32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepi32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m128d { unsafe { let convert = _mm_cvtepi32_pd(a).as_f64x2(); transmute(simd_select_bitmask(k, convert, src.as_f64x2())) @@ -12610,7 +13038,8 @@ pub fn _mm_mask_cvtepi32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m128d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] -pub fn _mm_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m128d { unsafe { let convert = _mm_cvtepi32_pd(a).as_f64x2(); transmute(simd_select_bitmask(k, convert, f64x2::ZERO)) @@ -12624,7 +13053,8 @@ pub fn _mm_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2ps))] -pub fn _mm512_cvtepu32_ps(a: __m512i) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepu32_ps(a: __m512i) -> __m512 { unsafe { let a = a.as_u32x16(); transmute::(simd_cast(a)) @@ -12638,7 +13068,8 @@ pub fn _mm512_cvtepu32_ps(a: __m512i) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2ps))] -pub fn _mm512_mask_cvtepu32_ps(src: __m512, k: __mmask16, a: __m512i) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepu32_ps(src: __m512, k: __mmask16, a: __m512i) -> __m512 { unsafe { let convert = _mm512_cvtepu32_ps(a).as_f32x16(); transmute(simd_select_bitmask(k, convert, src.as_f32x16())) @@ -12652,7 +13083,8 @@ pub fn _mm512_mask_cvtepu32_ps(src: __m512, k: __mmask16, a: __m512i) -> __m512 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2ps))] -pub fn _mm512_maskz_cvtepu32_ps(k: __mmask16, a: __m512i) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepu32_ps(k: __mmask16, a: __m512i) -> __m512 { unsafe { let convert = _mm512_cvtepu32_ps(a).as_f32x16(); transmute(simd_select_bitmask(k, convert, f32x16::ZERO)) @@ -12666,7 +13098,8 @@ pub fn _mm512_maskz_cvtepu32_ps(k: __mmask16, a: __m512i) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] -pub fn _mm512_cvtepu32_pd(a: __m256i) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepu32_pd(a: __m256i) -> __m512d { unsafe { let a = a.as_u32x8(); transmute::(simd_cast(a)) @@ -12680,7 +13113,8 @@ pub fn _mm512_cvtepu32_pd(a: __m256i) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] -pub fn _mm512_mask_cvtepu32_pd(src: __m512d, k: __mmask8, a: __m256i) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepu32_pd(src: __m512d, k: __mmask8, a: __m256i) -> __m512d { unsafe { let convert = _mm512_cvtepu32_pd(a).as_f64x8(); transmute(simd_select_bitmask(k, convert, src.as_f64x8())) @@ -12694,7 +13128,8 @@ pub fn _mm512_mask_cvtepu32_pd(src: __m512d, k: __mmask8, a: __m256i) -> __m512d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] -pub fn _mm512_maskz_cvtepu32_pd(k: __mmask8, a: __m256i) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepu32_pd(k: __mmask8, a: __m256i) -> __m512d { unsafe { let convert = _mm512_cvtepu32_pd(a).as_f64x8(); transmute(simd_select_bitmask(k, convert, f64x8::ZERO)) @@ -12708,7 +13143,8 @@ pub fn _mm512_maskz_cvtepu32_pd(k: __mmask8, a: __m256i) -> __m512d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] -pub fn _mm256_cvtepu32_pd(a: __m128i) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepu32_pd(a: __m128i) -> __m256d { unsafe { let a = a.as_u32x4(); transmute::(simd_cast(a)) @@ -12722,7 +13158,8 @@ pub fn _mm256_cvtepu32_pd(a: __m128i) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] -pub fn _mm256_mask_cvtepu32_pd(src: __m256d, k: __mmask8, a: __m128i) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepu32_pd(src: __m256d, k: __mmask8, a: __m128i) -> __m256d { unsafe { let convert = _mm256_cvtepu32_pd(a).as_f64x4(); transmute(simd_select_bitmask(k, convert, src.as_f64x4())) @@ -12736,7 +13173,8 @@ pub fn _mm256_mask_cvtepu32_pd(src: __m256d, k: __mmask8, a: __m128i) -> __m256d #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] -pub fn _mm256_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m256d { unsafe { let convert = _mm256_cvtepu32_pd(a).as_f64x4(); transmute(simd_select_bitmask(k, convert, f64x4::ZERO)) @@ -12750,7 +13188,8 @@ pub fn _mm256_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] -pub fn _mm_cvtepu32_pd(a: __m128i) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepu32_pd(a: __m128i) -> __m128d { unsafe { let a = a.as_u32x4(); let u64: u32x2 = simd_shuffle!(a, a, [0, 1]); @@ -12765,7 +13204,8 @@ pub fn _mm_cvtepu32_pd(a: __m128i) -> __m128d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] -pub fn _mm_mask_cvtepu32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cvtepu32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m128d { unsafe { let convert = _mm_cvtepu32_pd(a).as_f64x2(); transmute(simd_select_bitmask(k, convert, src.as_f64x2())) @@ -12779,7 +13219,8 @@ pub fn _mm_mask_cvtepu32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m128d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] -pub fn _mm_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m128d { unsafe { let convert = _mm_cvtepu32_pd(a).as_f64x2(); transmute(simd_select_bitmask(k, convert, f64x2::ZERO)) @@ -12793,7 +13234,8 @@ pub fn _mm_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] -pub fn _mm512_cvtepi32lo_pd(v2: __m512i) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi32lo_pd(v2: __m512i) -> __m512d { unsafe { let v2 = v2.as_i32x16(); let v256: i32x8 = simd_shuffle!(v2, v2, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -12808,7 +13250,8 @@ pub fn _mm512_cvtepi32lo_pd(v2: __m512i) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] -pub fn _mm512_mask_cvtepi32lo_pd(src: __m512d, k: __mmask8, v2: __m512i) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi32lo_pd(src: __m512d, k: __mmask8, v2: __m512i) -> __m512d { unsafe { let convert = _mm512_cvtepi32lo_pd(v2).as_f64x8(); transmute(simd_select_bitmask(k, convert, src.as_f64x8())) @@ -12822,7 +13265,8 @@ pub fn _mm512_mask_cvtepi32lo_pd(src: __m512d, k: __mmask8, v2: __m512i) -> __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] -pub fn _mm512_cvtepu32lo_pd(v2: __m512i) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepu32lo_pd(v2: __m512i) -> __m512d { unsafe { let v2 = v2.as_u32x16(); let v256: u32x8 = simd_shuffle!(v2, v2, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -12837,7 +13281,8 @@ pub fn _mm512_cvtepu32lo_pd(v2: __m512i) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] -pub fn _mm512_mask_cvtepu32lo_pd(src: __m512d, k: __mmask8, v2: __m512i) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepu32lo_pd(src: __m512d, k: __mmask8, v2: __m512i) -> __m512d { unsafe { let convert = _mm512_cvtepu32lo_pd(v2).as_f64x8(); transmute(simd_select_bitmask(k, convert, src.as_f64x8())) @@ -12851,7 +13296,8 @@ pub fn _mm512_mask_cvtepu32lo_pd(src: __m512d, k: __mmask8, v2: __m512i) -> __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovdw))] -pub fn _mm512_cvtepi32_epi16(a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi32_epi16(a: __m512i) -> __m256i { unsafe { let a = a.as_i32x16(); transmute::(simd_cast(a)) @@ -12865,7 +13311,8 @@ pub fn _mm512_cvtepi32_epi16(a: __m512i) -> __m256i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovdw))] -pub fn _mm512_mask_cvtepi32_epi16(src: __m256i, k: __mmask16, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi32_epi16(src: __m256i, k: __mmask16, a: __m512i) -> __m256i { unsafe { let convert = _mm512_cvtepi32_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, convert, src.as_i16x16())) @@ -12879,7 +13326,8 @@ pub fn _mm512_mask_cvtepi32_epi16(src: __m256i, k: __mmask16, a: __m512i) -> __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovdw))] -pub fn _mm512_maskz_cvtepi32_epi16(k: __mmask16, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi32_epi16(k: __mmask16, a: __m512i) -> __m256i { unsafe { let convert = _mm512_cvtepi32_epi16(a).as_i16x16(); transmute(simd_select_bitmask(k, convert, i16x16::ZERO)) @@ -12893,7 +13341,8 @@ pub fn _mm512_maskz_cvtepi32_epi16(k: __mmask16, a: __m512i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovdw))] -pub fn _mm256_cvtepi32_epi16(a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepi32_epi16(a: __m256i) -> __m128i { unsafe { let a = a.as_i32x8(); transmute::(simd_cast(a)) @@ -12907,7 +13356,8 @@ pub fn _mm256_cvtepi32_epi16(a: __m256i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovdw))] -pub fn _mm256_mask_cvtepi32_epi16(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepi32_epi16(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { unsafe { let convert = _mm256_cvtepi32_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, convert, src.as_i16x8())) @@ -12921,7 +13371,8 @@ pub fn _mm256_mask_cvtepi32_epi16(src: __m128i, k: __mmask8, a: __m256i) -> __m1 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovdw))] -pub fn _mm256_maskz_cvtepi32_epi16(k: __mmask8, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepi32_epi16(k: __mmask8, a: __m256i) -> __m128i { unsafe { let convert = _mm256_cvtepi32_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, convert, i16x8::ZERO)) @@ -12968,7 +13419,8 @@ pub fn _mm_maskz_cvtepi32_epi16(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovdb))] -pub fn _mm512_cvtepi32_epi8(a: __m512i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi32_epi8(a: __m512i) -> __m128i { unsafe { let a = a.as_i32x16(); transmute::(simd_cast(a)) @@ -12982,7 +13434,8 @@ pub fn _mm512_cvtepi32_epi8(a: __m512i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovdb))] -pub fn _mm512_mask_cvtepi32_epi8(src: __m128i, k: __mmask16, a: __m512i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi32_epi8(src: __m128i, k: __mmask16, a: __m512i) -> __m128i { unsafe { let convert = _mm512_cvtepi32_epi8(a).as_i8x16(); transmute(simd_select_bitmask(k, convert, src.as_i8x16())) @@ -12996,7 +13449,8 @@ pub fn _mm512_mask_cvtepi32_epi8(src: __m128i, k: __mmask16, a: __m512i) -> __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovdb))] -pub fn _mm512_maskz_cvtepi32_epi8(k: __mmask16, a: __m512i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi32_epi8(k: __mmask16, a: __m512i) -> __m128i { unsafe { let convert = _mm512_cvtepi32_epi8(a).as_i8x16(); transmute(simd_select_bitmask(k, convert, i8x16::ZERO)) @@ -13076,7 +13530,8 @@ pub fn _mm_maskz_cvtepi32_epi8(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovqd))] -pub fn _mm512_cvtepi64_epi32(a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi64_epi32(a: __m512i) -> __m256i { unsafe { let a = a.as_i64x8(); transmute::(simd_cast(a)) @@ -13090,7 +13545,8 @@ pub fn _mm512_cvtepi64_epi32(a: __m512i) -> __m256i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovqd))] -pub fn _mm512_mask_cvtepi64_epi32(src: __m256i, k: __mmask8, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi64_epi32(src: __m256i, k: __mmask8, a: __m512i) -> __m256i { unsafe { let convert = _mm512_cvtepi64_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, convert, src.as_i32x8())) @@ -13104,7 +13560,8 @@ pub fn _mm512_mask_cvtepi64_epi32(src: __m256i, k: __mmask8, a: __m512i) -> __m2 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovqd))] -pub fn _mm512_maskz_cvtepi64_epi32(k: __mmask8, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi64_epi32(k: __mmask8, a: __m512i) -> __m256i { unsafe { let convert = _mm512_cvtepi64_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, convert, i32x8::ZERO)) @@ -13118,7 +13575,8 @@ pub fn _mm512_maskz_cvtepi64_epi32(k: __mmask8, a: __m512i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovqd))] -pub fn _mm256_cvtepi64_epi32(a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtepi64_epi32(a: __m256i) -> __m128i { unsafe { let a = a.as_i64x4(); transmute::(simd_cast(a)) @@ -13132,7 +13590,8 @@ pub fn _mm256_cvtepi64_epi32(a: __m256i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovqd))] -pub fn _mm256_mask_cvtepi64_epi32(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cvtepi64_epi32(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { unsafe { let convert = _mm256_cvtepi64_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, convert, src.as_i32x4())) @@ -13146,7 +13605,8 @@ pub fn _mm256_mask_cvtepi64_epi32(src: __m128i, k: __mmask8, a: __m256i) -> __m1 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovqd))] -pub fn _mm256_maskz_cvtepi64_epi32(k: __mmask8, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_cvtepi64_epi32(k: __mmask8, a: __m256i) -> __m128i { unsafe { let convert = _mm256_cvtepi64_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, convert, i32x4::ZERO)) @@ -13193,7 +13653,8 @@ pub fn _mm_maskz_cvtepi64_epi32(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovqw))] -pub fn _mm512_cvtepi64_epi16(a: __m512i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtepi64_epi16(a: __m512i) -> __m128i { unsafe { let a = a.as_i64x8(); transmute::(simd_cast(a)) @@ -13207,7 +13668,8 @@ pub fn _mm512_cvtepi64_epi16(a: __m512i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovqw))] -pub fn _mm512_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m512i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m512i) -> __m128i { unsafe { let convert = _mm512_cvtepi64_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, convert, src.as_i16x8())) @@ -13221,7 +13683,8 @@ pub fn _mm512_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m512i) -> __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpmovqw))] -pub fn _mm512_maskz_cvtepi64_epi16(k: __mmask8, a: __m512i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_cvtepi64_epi16(k: __mmask8, a: __m512i) -> __m128i { unsafe { let convert = _mm512_cvtepi64_epi16(a).as_i16x8(); transmute(simd_select_bitmask(k, convert, i16x8::ZERO)) @@ -16178,7 +16641,8 @@ pub fn _mm_maskz_cvttpd_epu32(k: __mmask8, a: __m128d) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vxorps))] -pub fn _mm512_setzero_pd() -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setzero_pd() -> __m512d { // All-0 is a properly initialized __m512d unsafe { const { mem::zeroed() } } } @@ -16190,7 +16654,8 @@ pub fn _mm512_setzero_pd() -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vxorps))] -pub fn _mm512_setzero_ps() -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setzero_ps() -> __m512 { // All-0 is a properly initialized __m512 unsafe { const { mem::zeroed() } } } @@ -16202,7 +16667,8 @@ pub fn _mm512_setzero_ps() -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vxorps))] -pub fn _mm512_setzero() -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setzero() -> __m512 { // All-0 is a properly initialized __m512 unsafe { const { mem::zeroed() } } } @@ -16214,7 +16680,8 @@ pub fn _mm512_setzero() -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vxorps))] -pub fn _mm512_setzero_si512() -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setzero_si512() -> __m512i { // All-0 is a properly initialized __m512i unsafe { const { mem::zeroed() } } } @@ -16226,7 +16693,8 @@ pub fn _mm512_setzero_si512() -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vxorps))] -pub fn _mm512_setzero_epi32() -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setzero_epi32() -> __m512i { // All-0 is a properly initialized __m512i unsafe { const { mem::zeroed() } } } @@ -16238,7 +16706,8 @@ pub fn _mm512_setzero_epi32() -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_setr_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setr_epi32( e15: i32, e14: i32, e13: i32, @@ -16270,7 +16739,8 @@ pub fn _mm512_setr_epi32( #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set_epi8( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set_epi8( e63: i8, e62: i8, e61: i8, @@ -16353,7 +16823,8 @@ pub fn _mm512_set_epi8( #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set_epi16( e31: i16, e30: i16, e29: i16, @@ -16402,7 +16873,8 @@ pub fn _mm512_set_epi16( #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i { _mm512_set_epi32(d, c, b, a, d, c, b, a, d, c, b, a, d, c, b, a) } @@ -16412,7 +16884,8 @@ pub fn _mm512_set4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 { _mm512_set_ps(d, c, b, a, d, c, b, a, d, c, b, a, d, c, b, a) } @@ -16422,7 +16895,8 @@ pub fn _mm512_set4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d { _mm512_set_pd(d, c, b, a, d, c, b, a) } @@ -16432,7 +16906,8 @@ pub fn _mm512_set4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_setr4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setr4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i { _mm512_set_epi32(a, b, c, d, a, b, c, d, a, b, c, d, a, b, c, d) } @@ -16442,7 +16917,8 @@ pub fn _mm512_setr4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_setr4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setr4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 { _mm512_set_ps(a, b, c, d, a, b, c, d, a, b, c, d, a, b, c, d) } @@ -16452,7 +16928,8 @@ pub fn _mm512_setr4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_setr4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setr4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d { _mm512_set_pd(a, b, c, d, a, b, c, d) } @@ -16462,7 +16939,8 @@ pub fn _mm512_setr4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set_epi64( e0: i64, e1: i64, e2: i64, @@ -16481,7 +16959,8 @@ pub fn _mm512_set_epi64( #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_setr_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setr_epi64( e0: i64, e1: i64, e2: i64, @@ -19006,7 +19485,8 @@ pub fn _mm_maskz_expand_pd(k: __mmask8, a: __m128d) -> __m128d { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_rol_epi32(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_rol_epi32(a: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_rolv_epi32(a, _mm512_set1_epi32(IMM8)) } @@ -19019,7 +19499,12 @@ pub fn _mm512_rol_epi32(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_rol_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_rol_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, +) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_mask_rolv_epi32(src, k, a, _mm512_set1_epi32(IMM8)) } @@ -19032,7 +19517,8 @@ pub fn _mm512_mask_rol_epi32(src: __m512i, k: __mmask16, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_rol_epi32(k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_rol_epi32(k: __mmask16, a: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_maskz_rolv_epi32(k, a, _mm512_set1_epi32(IMM8)) } @@ -19045,7 +19531,8 @@ pub fn _mm512_maskz_rol_epi32(k: __mmask16, a: __m512i) -> __m5 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm256_rol_epi32(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_rol_epi32(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_rolv_epi32(a, _mm256_set1_epi32(IMM8)) } @@ -19058,7 +19545,12 @@ pub fn _mm256_rol_epi32(a: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_rol_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_rol_epi32( + src: __m256i, + k: __mmask8, + a: __m256i, +) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_mask_rolv_epi32(src, k, a, _mm256_set1_epi32(IMM8)) } @@ -19071,7 +19563,8 @@ pub fn _mm256_mask_rol_epi32(src: __m256i, k: __mmask8, a: __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_rol_epi32(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_rol_epi32(k: __mmask8, a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_maskz_rolv_epi32(k, a, _mm256_set1_epi32(IMM8)) } @@ -19084,7 +19577,8 @@ pub fn _mm256_maskz_rol_epi32(k: __mmask8, a: __m256i) -> __m25 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm_rol_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_rol_epi32(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_rolv_epi32(a, _mm_set1_epi32(IMM8)) } @@ -19097,7 +19591,8 @@ pub fn _mm_rol_epi32(a: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_rol_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_rol_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_mask_rolv_epi32(src, k, a, _mm_set1_epi32(IMM8)) } @@ -19110,7 +19605,8 @@ pub fn _mm_mask_rol_epi32(src: __m128i, k: __mmask8, a: __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_rol_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_rol_epi32(k: __mmask8, a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_maskz_rolv_epi32(k, a, _mm_set1_epi32(IMM8)) } @@ -19123,7 +19619,8 @@ pub fn _mm_maskz_rol_epi32(k: __mmask8, a: __m128i) -> __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_ror_epi32(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_ror_epi32(a: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_rorv_epi32(a, _mm512_set1_epi32(IMM8)) } @@ -19136,7 +19633,12 @@ pub fn _mm512_ror_epi32(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_ror_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_ror_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, +) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_mask_rorv_epi32(src, k, a, _mm512_set1_epi32(IMM8)) } @@ -19149,7 +19651,8 @@ pub fn _mm512_mask_ror_epi32(src: __m512i, k: __mmask16, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_ror_epi32(k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_ror_epi32(k: __mmask16, a: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_maskz_rorv_epi32(k, a, _mm512_set1_epi32(IMM8)) } @@ -19162,7 +19665,8 @@ pub fn _mm512_maskz_ror_epi32(k: __mmask16, a: __m512i) -> __m5 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm256_ror_epi32(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_ror_epi32(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_rorv_epi32(a, _mm256_set1_epi32(IMM8)) } @@ -19175,7 +19679,12 @@ pub fn _mm256_ror_epi32(a: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_ror_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_ror_epi32( + src: __m256i, + k: __mmask8, + a: __m256i, +) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_mask_rorv_epi32(src, k, a, _mm256_set1_epi32(IMM8)) } @@ -19188,7 +19697,8 @@ pub fn _mm256_mask_ror_epi32(src: __m256i, k: __mmask8, a: __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_ror_epi32(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_ror_epi32(k: __mmask8, a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_maskz_rorv_epi32(k, a, _mm256_set1_epi32(IMM8)) } @@ -19201,7 +19711,8 @@ pub fn _mm256_maskz_ror_epi32(k: __mmask8, a: __m256i) -> __m25 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm_ror_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_ror_epi32(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_rorv_epi32(a, _mm_set1_epi32(IMM8)) } @@ -19214,7 +19725,8 @@ pub fn _mm_ror_epi32(a: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_ror_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_ror_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_mask_rorv_epi32(src, k, a, _mm_set1_epi32(IMM8)) } @@ -19227,7 +19739,8 @@ pub fn _mm_mask_ror_epi32(src: __m128i, k: __mmask8, a: __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_ror_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_ror_epi32(k: __mmask8, a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_maskz_rorv_epi32(k, a, _mm_set1_epi32(IMM8)) } @@ -19240,7 +19753,8 @@ pub fn _mm_maskz_ror_epi32(k: __mmask8, a: __m128i) -> __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_rol_epi64(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_rol_epi64(a: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_rolv_epi64(a, _mm512_set1_epi64(IMM8 as i64)) } @@ -19253,7 +19767,12 @@ pub fn _mm512_rol_epi64(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_rol_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_rol_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, +) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_mask_rolv_epi64(src, k, a, _mm512_set1_epi64(IMM8 as i64)) } @@ -19266,7 +19785,8 @@ pub fn _mm512_mask_rol_epi64(src: __m512i, k: __mmask8, a: __m5 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_rol_epi64(k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_rol_epi64(k: __mmask8, a: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_maskz_rolv_epi64(k, a, _mm512_set1_epi64(IMM8 as i64)) } @@ -19279,7 +19799,8 @@ pub fn _mm512_maskz_rol_epi64(k: __mmask8, a: __m512i) -> __m51 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm256_rol_epi64(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_rol_epi64(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_rolv_epi64(a, _mm256_set1_epi64x(IMM8 as i64)) } @@ -19292,7 +19813,12 @@ pub fn _mm256_rol_epi64(a: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_rol_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_rol_epi64( + src: __m256i, + k: __mmask8, + a: __m256i, +) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_mask_rolv_epi64(src, k, a, _mm256_set1_epi64x(IMM8 as i64)) } @@ -19305,7 +19831,8 @@ pub fn _mm256_mask_rol_epi64(src: __m256i, k: __mmask8, a: __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_rol_epi64(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_rol_epi64(k: __mmask8, a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_maskz_rolv_epi64(k, a, _mm256_set1_epi64x(IMM8 as i64)) } @@ -19318,7 +19845,8 @@ pub fn _mm256_maskz_rol_epi64(k: __mmask8, a: __m256i) -> __m25 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm_rol_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_rol_epi64(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_rolv_epi64(a, _mm_set1_epi64x(IMM8 as i64)) } @@ -19331,7 +19859,8 @@ pub fn _mm_rol_epi64(a: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_rol_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_rol_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_mask_rolv_epi64(src, k, a, _mm_set1_epi64x(IMM8 as i64)) } @@ -19344,7 +19873,8 @@ pub fn _mm_mask_rol_epi64(src: __m128i, k: __mmask8, a: __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_rol_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_rol_epi64(k: __mmask8, a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_maskz_rolv_epi64(k, a, _mm_set1_epi64x(IMM8 as i64)) } @@ -19357,7 +19887,8 @@ pub fn _mm_maskz_rol_epi64(k: __mmask8, a: __m128i) -> __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_ror_epi64(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_ror_epi64(a: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_rorv_epi64(a, _mm512_set1_epi64(IMM8 as i64)) } @@ -19370,7 +19901,12 @@ pub fn _mm512_ror_epi64(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_ror_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_ror_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, +) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_mask_rorv_epi64(src, k, a, _mm512_set1_epi64(IMM8 as i64)) } @@ -19383,7 +19919,8 @@ pub fn _mm512_mask_ror_epi64(src: __m512i, k: __mmask8, a: __m5 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_ror_epi64(k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_ror_epi64(k: __mmask8, a: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_maskz_rorv_epi64(k, a, _mm512_set1_epi64(IMM8 as i64)) } @@ -19396,7 +19933,8 @@ pub fn _mm512_maskz_ror_epi64(k: __mmask8, a: __m512i) -> __m51 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(1)] -pub fn _mm256_ror_epi64(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_ror_epi64(a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_rorv_epi64(a, _mm256_set1_epi64x(IMM8 as i64)) } @@ -19409,7 +19947,12 @@ pub fn _mm256_ror_epi64(a: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_ror_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_ror_epi64( + src: __m256i, + k: __mmask8, + a: __m256i, +) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_mask_rorv_epi64(src, k, a, _mm256_set1_epi64x(IMM8 as i64)) } @@ -19422,7 +19965,8 @@ pub fn _mm256_mask_ror_epi64(src: __m256i, k: __mmask8, a: __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_ror_epi64(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_ror_epi64(k: __mmask8, a: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_maskz_rorv_epi64(k, a, _mm256_set1_epi64x(IMM8 as i64)) } @@ -19435,7 +19979,8 @@ pub fn _mm256_maskz_ror_epi64(k: __mmask8, a: __m256i) -> __m25 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(1)] -pub fn _mm_ror_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_ror_epi64(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_rorv_epi64(a, _mm_set1_epi64x(IMM8 as i64)) } @@ -19448,7 +19993,8 @@ pub fn _mm_ror_epi64(a: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_ror_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_ror_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_mask_rorv_epi64(src, k, a, _mm_set1_epi64x(IMM8 as i64)) } @@ -19461,7 +20007,8 @@ pub fn _mm_mask_ror_epi64(src: __m128i, k: __mmask8, a: __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_ror_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_ror_epi64(k: __mmask8, a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_maskz_rorv_epi64(k, a, _mm_set1_epi64x(IMM8 as i64)) } @@ -19474,7 +20021,8 @@ pub fn _mm_maskz_ror_epi64(k: __mmask8, a: __m128i) -> __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_slli_epi32(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_slli_epi32(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 32 { @@ -19493,7 +20041,12 @@ pub fn _mm512_slli_epi32(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_slli_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_slli_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = if IMM8 >= 32 { @@ -19513,7 +20066,8 @@ pub fn _mm512_mask_slli_epi32(src: __m512i, k: __mmask16, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_slli_epi32(k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_slli_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 32 { @@ -19533,7 +20087,12 @@ pub fn _mm512_maskz_slli_epi32(k: __mmask16, a: __m512i) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_slli_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_slli_epi32( + src: __m256i, + k: __mmask8, + a: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = if IMM8 >= 32 { @@ -19553,7 +20112,8 @@ pub fn _mm256_mask_slli_epi32(src: __m256i, k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_slli_epi32(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_slli_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 32 { @@ -19573,7 +20133,12 @@ pub fn _mm256_maskz_slli_epi32(k: __mmask8, a: __m256i) -> __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_slli_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_slli_epi32( + src: __m128i, + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = if IMM8 >= 32 { @@ -19593,7 +20158,8 @@ pub fn _mm_mask_slli_epi32(src: __m128i, k: __mmask8, a: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_slli_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_slli_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 32 { @@ -19613,7 +20179,8 @@ pub fn _mm_maskz_slli_epi32(k: __mmask8, a: __m128i) -> __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_srli_epi32(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srli_epi32(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 32 { @@ -19632,7 +20199,12 @@ pub fn _mm512_srli_epi32(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_srli_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srli_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = if IMM8 >= 32 { @@ -19652,7 +20224,8 @@ pub fn _mm512_mask_srli_epi32(src: __m512i, k: __mmask16, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_srli_epi32(k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srli_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 32 { @@ -19672,7 +20245,12 @@ pub fn _mm512_maskz_srli_epi32(k: __mmask16, a: __m512i) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_srli_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srli_epi32( + src: __m256i, + k: __mmask8, + a: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = if IMM8 >= 32 { @@ -19692,7 +20270,8 @@ pub fn _mm256_mask_srli_epi32(src: __m256i, k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_srli_epi32(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srli_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 32 { @@ -19712,7 +20291,12 @@ pub fn _mm256_maskz_srli_epi32(k: __mmask8, a: __m256i) -> __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_srli_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srli_epi32( + src: __m128i, + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = if IMM8 >= 32 { @@ -19732,7 +20316,8 @@ pub fn _mm_mask_srli_epi32(src: __m128i, k: __mmask8, a: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_srli_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srli_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 32 { @@ -19752,7 +20337,8 @@ pub fn _mm_maskz_srli_epi32(k: __mmask8, a: __m128i) -> __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_slli_epi64(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_slli_epi64(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 64 { @@ -19771,7 +20357,12 @@ pub fn _mm512_slli_epi64(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_slli_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_slli_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = if IMM8 >= 64 { @@ -19791,7 +20382,8 @@ pub fn _mm512_mask_slli_epi64(src: __m512i, k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_slli_epi64(k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_slli_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 64 { @@ -19811,7 +20403,12 @@ pub fn _mm512_maskz_slli_epi64(k: __mmask8, a: __m512i) -> __m5 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_slli_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_slli_epi64( + src: __m256i, + k: __mmask8, + a: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = if IMM8 >= 64 { @@ -19831,7 +20428,8 @@ pub fn _mm256_mask_slli_epi64(src: __m256i, k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_slli_epi64(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_slli_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 64 { @@ -19851,7 +20449,12 @@ pub fn _mm256_maskz_slli_epi64(k: __mmask8, a: __m256i) -> __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_slli_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_slli_epi64( + src: __m128i, + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = if IMM8 >= 64 { @@ -19871,7 +20474,8 @@ pub fn _mm_mask_slli_epi64(src: __m128i, k: __mmask8, a: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_slli_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_slli_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 64 { @@ -19891,7 +20495,8 @@ pub fn _mm_maskz_slli_epi64(k: __mmask8, a: __m128i) -> __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_srli_epi64(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srli_epi64(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 64 { @@ -19910,7 +20515,12 @@ pub fn _mm512_srli_epi64(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_srli_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srli_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = if IMM8 >= 64 { @@ -19930,7 +20540,8 @@ pub fn _mm512_mask_srli_epi64(src: __m512i, k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_srli_epi64(k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srli_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 64 { @@ -19950,7 +20561,12 @@ pub fn _mm512_maskz_srli_epi64(k: __mmask8, a: __m512i) -> __m5 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_srli_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srli_epi64( + src: __m256i, + k: __mmask8, + a: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = if IMM8 >= 64 { @@ -19970,7 +20586,8 @@ pub fn _mm256_mask_srli_epi64(src: __m256i, k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_srli_epi64(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srli_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 64 { @@ -19990,7 +20607,12 @@ pub fn _mm256_maskz_srli_epi64(k: __mmask8, a: __m256i) -> __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_srli_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srli_epi64( + src: __m128i, + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = if IMM8 >= 64 { @@ -20010,7 +20632,8 @@ pub fn _mm_mask_srli_epi64(src: __m128i, k: __mmask8, a: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_srli_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srli_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); if IMM8 >= 64 { @@ -20622,7 +21245,8 @@ pub fn _mm_maskz_sra_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_srai_epi32(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srai_epi32(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); transmute(simd_shr(a.as_i32x16(), i32x16::splat(IMM8.min(31) as i32))) @@ -20637,7 +21261,12 @@ pub fn _mm512_srai_epi32(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_srai_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srai_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = simd_shr(a.as_i32x16(), i32x16::splat(IMM8.min(31) as i32)); @@ -20653,7 +21282,8 @@ pub fn _mm512_mask_srai_epi32(src: __m512i, k: __mmask16, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_srai_epi32(k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srai_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = simd_shr(a.as_i32x16(), i32x16::splat(IMM8.min(31) as i32)); @@ -20669,7 +21299,12 @@ pub fn _mm512_maskz_srai_epi32(k: __mmask16, a: __m512i) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_srai_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srai_epi32( + src: __m256i, + k: __mmask8, + a: __m256i, +) -> __m256i { unsafe { let r = simd_shr(a.as_i32x8(), i32x8::splat(IMM8.min(31) as i32)); transmute(simd_select_bitmask(k, r, src.as_i32x8())) @@ -20684,7 +21319,8 @@ pub fn _mm256_mask_srai_epi32(src: __m256i, k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_srai_epi32(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srai_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { let r = simd_shr(a.as_i32x8(), i32x8::splat(IMM8.min(31) as i32)); transmute(simd_select_bitmask(k, r, i32x8::ZERO)) @@ -20699,7 +21335,12 @@ pub fn _mm256_maskz_srai_epi32(k: __mmask8, a: __m256i) -> __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_srai_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srai_epi32( + src: __m128i, + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { let r = simd_shr(a.as_i32x4(), i32x4::splat(IMM8.min(31) as i32)); transmute(simd_select_bitmask(k, r, src.as_i32x4())) @@ -20714,7 +21355,8 @@ pub fn _mm_mask_srai_epi32(src: __m128i, k: __mmask8, a: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_srai_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srai_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { let r = simd_shr(a.as_i32x4(), i32x4::splat(IMM8.min(31) as i32)); transmute(simd_select_bitmask(k, r, i32x4::ZERO)) @@ -20729,7 +21371,8 @@ pub fn _mm_maskz_srai_epi32(k: __mmask8, a: __m128i) -> __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_srai_epi64(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srai_epi64(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); transmute(simd_shr(a.as_i64x8(), i64x8::splat(IMM8.min(63) as i64))) @@ -20744,7 +21387,12 @@ pub fn _mm512_srai_epi64(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_srai_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srai_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = simd_shr(a.as_i64x8(), i64x8::splat(IMM8.min(63) as i64)); @@ -20760,7 +21408,8 @@ pub fn _mm512_mask_srai_epi64(src: __m512i, k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_srai_epi64(k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srai_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = simd_shr(a.as_i64x8(), i64x8::splat(IMM8.min(63) as i64)); @@ -20776,7 +21425,8 @@ pub fn _mm512_maskz_srai_epi64(k: __mmask8, a: __m512i) -> __m5 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm256_srai_epi64(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srai_epi64(a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); transmute(simd_shr(a.as_i64x4(), i64x4::splat(IMM8.min(63) as i64))) @@ -20791,7 +21441,12 @@ pub fn _mm256_srai_epi64(a: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_srai_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srai_epi64( + src: __m256i, + k: __mmask8, + a: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = simd_shr(a.as_i64x4(), i64x4::splat(IMM8.min(63) as i64)); @@ -20807,7 +21462,8 @@ pub fn _mm256_mask_srai_epi64(src: __m256i, k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_srai_epi64(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srai_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = simd_shr(a.as_i64x4(), i64x4::splat(IMM8.min(63) as i64)); @@ -20823,7 +21479,8 @@ pub fn _mm256_maskz_srai_epi64(k: __mmask8, a: __m256i) -> __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm_srai_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srai_epi64(a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); transmute(simd_shr(a.as_i64x2(), i64x2::splat(IMM8.min(63) as i64))) @@ -20838,7 +21495,12 @@ pub fn _mm_srai_epi64(a: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_srai_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srai_epi64( + src: __m128i, + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = simd_shr(a.as_i64x2(), i64x2::splat(IMM8.min(63) as i64)); @@ -20854,7 +21516,8 @@ pub fn _mm_mask_srai_epi64(src: __m128i, k: __mmask8, a: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_srai_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srai_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = simd_shr(a.as_i64x2(), i64x2::splat(IMM8.min(63) as i64)); @@ -20869,7 +21532,8 @@ pub fn _mm_maskz_srai_epi64(k: __mmask8, a: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravd))] -pub fn _mm512_srav_epi32(a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srav_epi32(a: __m512i, count: __m512i) -> __m512i { unsafe { let count = count.as_u32x16(); let no_overflow: u32x16 = simd_lt(count, u32x16::splat(u32::BITS)); @@ -20885,7 +21549,13 @@ pub fn _mm512_srav_epi32(a: __m512i, count: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravd))] -pub fn _mm512_mask_srav_epi32(src: __m512i, k: __mmask16, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srav_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, + count: __m512i, +) -> __m512i { unsafe { let shf = _mm512_srav_epi32(a, count).as_i32x16(); transmute(simd_select_bitmask(k, shf, src.as_i32x16())) @@ -20899,7 +21569,8 @@ pub fn _mm512_mask_srav_epi32(src: __m512i, k: __mmask16, a: __m512i, count: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravd))] -pub fn _mm512_maskz_srav_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srav_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m512i { unsafe { let shf = _mm512_srav_epi32(a, count).as_i32x16(); transmute(simd_select_bitmask(k, shf, i32x16::ZERO)) @@ -20913,7 +21584,13 @@ pub fn _mm512_maskz_srav_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravd))] -pub fn _mm256_mask_srav_epi32(src: __m256i, k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srav_epi32( + src: __m256i, + k: __mmask8, + a: __m256i, + count: __m256i, +) -> __m256i { unsafe { let shf = _mm256_srav_epi32(a, count).as_i32x8(); transmute(simd_select_bitmask(k, shf, src.as_i32x8())) @@ -20927,7 +21604,8 @@ pub fn _mm256_mask_srav_epi32(src: __m256i, k: __mmask8, a: __m256i, count: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravd))] -pub fn _mm256_maskz_srav_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srav_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { unsafe { let shf = _mm256_srav_epi32(a, count).as_i32x8(); transmute(simd_select_bitmask(k, shf, i32x8::ZERO)) @@ -20941,7 +21619,8 @@ pub fn _mm256_maskz_srav_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravd))] -pub fn _mm_mask_srav_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srav_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srav_epi32(a, count).as_i32x4(); transmute(simd_select_bitmask(k, shf, src.as_i32x4())) @@ -20955,7 +21634,8 @@ pub fn _mm_mask_srav_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravd))] -pub fn _mm_maskz_srav_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srav_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srav_epi32(a, count).as_i32x4(); transmute(simd_select_bitmask(k, shf, i32x4::ZERO)) @@ -20969,7 +21649,8 @@ pub fn _mm_maskz_srav_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravq))] -pub fn _mm512_srav_epi64(a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srav_epi64(a: __m512i, count: __m512i) -> __m512i { unsafe { let count = count.as_u64x8(); let no_overflow: u64x8 = simd_lt(count, u64x8::splat(u64::BITS as u64)); @@ -20985,7 +21666,13 @@ pub fn _mm512_srav_epi64(a: __m512i, count: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravq))] -pub fn _mm512_mask_srav_epi64(src: __m512i, k: __mmask8, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srav_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, + count: __m512i, +) -> __m512i { unsafe { let shf = _mm512_srav_epi64(a, count).as_i64x8(); transmute(simd_select_bitmask(k, shf, src.as_i64x8())) @@ -20999,7 +21686,8 @@ pub fn _mm512_mask_srav_epi64(src: __m512i, k: __mmask8, a: __m512i, count: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravq))] -pub fn _mm512_maskz_srav_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srav_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m512i { unsafe { let shf = _mm512_srav_epi64(a, count).as_i64x8(); transmute(simd_select_bitmask(k, shf, i64x8::ZERO)) @@ -21013,7 +21701,8 @@ pub fn _mm512_maskz_srav_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m51 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravq))] -pub fn _mm256_srav_epi64(a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_srav_epi64(a: __m256i, count: __m256i) -> __m256i { unsafe { let count = count.as_u64x4(); let no_overflow: u64x4 = simd_lt(count, u64x4::splat(u64::BITS as u64)); @@ -21029,7 +21718,13 @@ pub fn _mm256_srav_epi64(a: __m256i, count: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravq))] -pub fn _mm256_mask_srav_epi64(src: __m256i, k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srav_epi64( + src: __m256i, + k: __mmask8, + a: __m256i, + count: __m256i, +) -> __m256i { unsafe { let shf = _mm256_srav_epi64(a, count).as_i64x4(); transmute(simd_select_bitmask(k, shf, src.as_i64x4())) @@ -21043,7 +21738,8 @@ pub fn _mm256_mask_srav_epi64(src: __m256i, k: __mmask8, a: __m256i, count: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravq))] -pub fn _mm256_maskz_srav_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srav_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { unsafe { let shf = _mm256_srav_epi64(a, count).as_i64x4(); transmute(simd_select_bitmask(k, shf, i64x4::ZERO)) @@ -21057,7 +21753,8 @@ pub fn _mm256_maskz_srav_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravq))] -pub fn _mm_srav_epi64(a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srav_epi64(a: __m128i, count: __m128i) -> __m128i { unsafe { let count = count.as_u64x2(); let no_overflow: u64x2 = simd_lt(count, u64x2::splat(u64::BITS as u64)); @@ -21073,7 +21770,8 @@ pub fn _mm_srav_epi64(a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravq))] -pub fn _mm_mask_srav_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srav_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srav_epi64(a, count).as_i64x2(); transmute(simd_select_bitmask(k, shf, src.as_i64x2())) @@ -21087,7 +21785,8 @@ pub fn _mm_mask_srav_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsravq))] -pub fn _mm_maskz_srav_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srav_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srav_epi64(a, count).as_i64x2(); transmute(simd_select_bitmask(k, shf, i64x2::ZERO)) @@ -21101,7 +21800,8 @@ pub fn _mm_maskz_srav_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvd))] -pub fn _mm512_rolv_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_rolv_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_funnel_shl( a.as_u32x16(), @@ -21118,7 +21818,8 @@ pub fn _mm512_rolv_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvd))] -pub fn _mm512_mask_rolv_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_rolv_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let rol = _mm512_rolv_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, rol, src.as_i32x16())) @@ -21132,7 +21833,8 @@ pub fn _mm512_mask_rolv_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvd))] -pub fn _mm512_maskz_rolv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_rolv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let rol = _mm512_rolv_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, rol, i32x16::ZERO)) @@ -21146,7 +21848,8 @@ pub fn _mm512_maskz_rolv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvd))] -pub fn _mm256_rolv_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_rolv_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_funnel_shl( a.as_u32x8(), @@ -21163,7 +21866,8 @@ pub fn _mm256_rolv_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvd))] -pub fn _mm256_mask_rolv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_rolv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let rol = _mm256_rolv_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, rol, src.as_i32x8())) @@ -21177,7 +21881,8 @@ pub fn _mm256_mask_rolv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvd))] -pub fn _mm256_maskz_rolv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_rolv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let rol = _mm256_rolv_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, rol, i32x8::ZERO)) @@ -21191,7 +21896,8 @@ pub fn _mm256_maskz_rolv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvd))] -pub fn _mm_rolv_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_rolv_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_funnel_shl( a.as_u32x4(), @@ -21208,7 +21914,8 @@ pub fn _mm_rolv_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvd))] -pub fn _mm_mask_rolv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_rolv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let rol = _mm_rolv_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, rol, src.as_i32x4())) @@ -21222,7 +21929,8 @@ pub fn _mm_mask_rolv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvd))] -pub fn _mm_maskz_rolv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_rolv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let rol = _mm_rolv_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, rol, i32x4::ZERO)) @@ -21236,7 +21944,8 @@ pub fn _mm_maskz_rolv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvd))] -pub fn _mm512_rorv_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_rorv_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_funnel_shr( a.as_u32x16(), @@ -21253,7 +21962,8 @@ pub fn _mm512_rorv_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvd))] -pub fn _mm512_mask_rorv_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_rorv_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let ror = _mm512_rorv_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, ror, src.as_i32x16())) @@ -21267,7 +21977,8 @@ pub fn _mm512_mask_rorv_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvd))] -pub fn _mm512_maskz_rorv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_rorv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let ror = _mm512_rorv_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, ror, i32x16::ZERO)) @@ -21281,7 +21992,8 @@ pub fn _mm512_maskz_rorv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvd))] -pub fn _mm256_rorv_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_rorv_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_funnel_shr( a.as_u32x8(), @@ -21298,7 +22010,8 @@ pub fn _mm256_rorv_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvd))] -pub fn _mm256_mask_rorv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_rorv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let ror = _mm256_rorv_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, ror, src.as_i32x8())) @@ -21312,7 +22025,8 @@ pub fn _mm256_mask_rorv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvd))] -pub fn _mm256_maskz_rorv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_rorv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let ror = _mm256_rorv_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, ror, i32x8::ZERO)) @@ -21326,7 +22040,8 @@ pub fn _mm256_maskz_rorv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvd))] -pub fn _mm_rorv_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_rorv_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_funnel_shr( a.as_u32x4(), @@ -21343,7 +22058,8 @@ pub fn _mm_rorv_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvd))] -pub fn _mm_mask_rorv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_rorv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let ror = _mm_rorv_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, ror, src.as_i32x4())) @@ -21357,7 +22073,8 @@ pub fn _mm_mask_rorv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvd))] -pub fn _mm_maskz_rorv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_rorv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let ror = _mm_rorv_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, ror, i32x4::ZERO)) @@ -21371,7 +22088,8 @@ pub fn _mm_maskz_rorv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvq))] -pub fn _mm512_rolv_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_rolv_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_funnel_shl( a.as_u64x8(), @@ -21388,7 +22106,8 @@ pub fn _mm512_rolv_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvq))] -pub fn _mm512_mask_rolv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_rolv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let rol = _mm512_rolv_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, rol, src.as_i64x8())) @@ -21402,7 +22121,8 @@ pub fn _mm512_mask_rolv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvq))] -pub fn _mm512_maskz_rolv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_rolv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let rol = _mm512_rolv_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, rol, i64x8::ZERO)) @@ -21416,7 +22136,8 @@ pub fn _mm512_maskz_rolv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvq))] -pub fn _mm256_rolv_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_rolv_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_funnel_shl( a.as_u64x4(), @@ -21433,7 +22154,8 @@ pub fn _mm256_rolv_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvq))] -pub fn _mm256_mask_rolv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_rolv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let rol = _mm256_rolv_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, rol, src.as_i64x4())) @@ -21447,7 +22169,8 @@ pub fn _mm256_mask_rolv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvq))] -pub fn _mm256_maskz_rolv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_rolv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let rol = _mm256_rolv_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, rol, i64x4::ZERO)) @@ -21461,7 +22184,8 @@ pub fn _mm256_maskz_rolv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvq))] -pub fn _mm_rolv_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_rolv_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_funnel_shl( a.as_u64x2(), @@ -21478,7 +22202,8 @@ pub fn _mm_rolv_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvq))] -pub fn _mm_mask_rolv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_rolv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let rol = _mm_rolv_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, rol, src.as_i64x2())) @@ -21492,7 +22217,8 @@ pub fn _mm_mask_rolv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprolvq))] -pub fn _mm_maskz_rolv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_rolv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let rol = _mm_rolv_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, rol, i64x2::ZERO)) @@ -21506,7 +22232,8 @@ pub fn _mm_maskz_rolv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvq))] -pub fn _mm512_rorv_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_rorv_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_funnel_shr( a.as_u64x8(), @@ -21523,7 +22250,8 @@ pub fn _mm512_rorv_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvq))] -pub fn _mm512_mask_rorv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_rorv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let ror = _mm512_rorv_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, ror, src.as_i64x8())) @@ -21537,7 +22265,8 @@ pub fn _mm512_mask_rorv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvq))] -pub fn _mm512_maskz_rorv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_rorv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let ror = _mm512_rorv_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, ror, i64x8::ZERO)) @@ -21551,7 +22280,8 @@ pub fn _mm512_maskz_rorv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvq))] -pub fn _mm256_rorv_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_rorv_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_funnel_shr( a.as_u64x4(), @@ -21568,7 +22298,8 @@ pub fn _mm256_rorv_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvq))] -pub fn _mm256_mask_rorv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_rorv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let ror = _mm256_rorv_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, ror, src.as_i64x4())) @@ -21582,7 +22313,8 @@ pub fn _mm256_mask_rorv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvq))] -pub fn _mm256_maskz_rorv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_rorv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let ror = _mm256_rorv_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, ror, i64x4::ZERO)) @@ -21596,7 +22328,8 @@ pub fn _mm256_maskz_rorv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvq))] -pub fn _mm_rorv_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_rorv_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_funnel_shr( a.as_u64x2(), @@ -21613,7 +22346,8 @@ pub fn _mm_rorv_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvq))] -pub fn _mm_mask_rorv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_rorv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let ror = _mm_rorv_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, ror, src.as_i64x2())) @@ -21627,7 +22361,8 @@ pub fn _mm_mask_rorv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vprorvq))] -pub fn _mm_maskz_rorv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_rorv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let ror = _mm_rorv_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, ror, i64x2::ZERO)) @@ -21641,7 +22376,8 @@ pub fn _mm_maskz_rorv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvd))] -pub fn _mm512_sllv_epi32(a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_sllv_epi32(a: __m512i, count: __m512i) -> __m512i { unsafe { let count = count.as_u32x16(); let no_overflow: u32x16 = simd_lt(count, u32x16::splat(u32::BITS)); @@ -21657,7 +22393,13 @@ pub fn _mm512_sllv_epi32(a: __m512i, count: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvd))] -pub fn _mm512_mask_sllv_epi32(src: __m512i, k: __mmask16, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_sllv_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, + count: __m512i, +) -> __m512i { unsafe { let shf = _mm512_sllv_epi32(a, count).as_i32x16(); transmute(simd_select_bitmask(k, shf, src.as_i32x16())) @@ -21671,7 +22413,8 @@ pub fn _mm512_mask_sllv_epi32(src: __m512i, k: __mmask16, a: __m512i, count: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvd))] -pub fn _mm512_maskz_sllv_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_sllv_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m512i { unsafe { let shf = _mm512_sllv_epi32(a, count).as_i32x16(); transmute(simd_select_bitmask(k, shf, i32x16::ZERO)) @@ -21685,7 +22428,13 @@ pub fn _mm512_maskz_sllv_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvd))] -pub fn _mm256_mask_sllv_epi32(src: __m256i, k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_sllv_epi32( + src: __m256i, + k: __mmask8, + a: __m256i, + count: __m256i, +) -> __m256i { unsafe { let shf = _mm256_sllv_epi32(a, count).as_i32x8(); transmute(simd_select_bitmask(k, shf, src.as_i32x8())) @@ -21699,7 +22448,8 @@ pub fn _mm256_mask_sllv_epi32(src: __m256i, k: __mmask8, a: __m256i, count: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvd))] -pub fn _mm256_maskz_sllv_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_sllv_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { unsafe { let shf = _mm256_sllv_epi32(a, count).as_i32x8(); transmute(simd_select_bitmask(k, shf, i32x8::ZERO)) @@ -21713,7 +22463,8 @@ pub fn _mm256_maskz_sllv_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvd))] -pub fn _mm_mask_sllv_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sllv_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_sllv_epi32(a, count).as_i32x4(); transmute(simd_select_bitmask(k, shf, src.as_i32x4())) @@ -21727,7 +22478,8 @@ pub fn _mm_mask_sllv_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvd))] -pub fn _mm_maskz_sllv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sllv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_sllv_epi32(a, count).as_i32x4(); transmute(simd_select_bitmask(k, shf, i32x4::ZERO)) @@ -21741,7 +22493,8 @@ pub fn _mm_maskz_sllv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvd))] -pub fn _mm512_srlv_epi32(a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srlv_epi32(a: __m512i, count: __m512i) -> __m512i { unsafe { let count = count.as_u32x16(); let no_overflow: u32x16 = simd_lt(count, u32x16::splat(u32::BITS)); @@ -21757,7 +22510,13 @@ pub fn _mm512_srlv_epi32(a: __m512i, count: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvd))] -pub fn _mm512_mask_srlv_epi32(src: __m512i, k: __mmask16, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srlv_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, + count: __m512i, +) -> __m512i { unsafe { let shf = _mm512_srlv_epi32(a, count).as_i32x16(); transmute(simd_select_bitmask(k, shf, src.as_i32x16())) @@ -21771,7 +22530,8 @@ pub fn _mm512_mask_srlv_epi32(src: __m512i, k: __mmask16, a: __m512i, count: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvd))] -pub fn _mm512_maskz_srlv_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srlv_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m512i { unsafe { let shf = _mm512_srlv_epi32(a, count).as_i32x16(); transmute(simd_select_bitmask(k, shf, i32x16::ZERO)) @@ -21785,7 +22545,13 @@ pub fn _mm512_maskz_srlv_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvd))] -pub fn _mm256_mask_srlv_epi32(src: __m256i, k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srlv_epi32( + src: __m256i, + k: __mmask8, + a: __m256i, + count: __m256i, +) -> __m256i { unsafe { let shf = _mm256_srlv_epi32(a, count).as_i32x8(); transmute(simd_select_bitmask(k, shf, src.as_i32x8())) @@ -21799,7 +22565,8 @@ pub fn _mm256_mask_srlv_epi32(src: __m256i, k: __mmask8, a: __m256i, count: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvd))] -pub fn _mm256_maskz_srlv_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srlv_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { unsafe { let shf = _mm256_srlv_epi32(a, count).as_i32x8(); transmute(simd_select_bitmask(k, shf, i32x8::ZERO)) @@ -21813,7 +22580,8 @@ pub fn _mm256_maskz_srlv_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvd))] -pub fn _mm_mask_srlv_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srlv_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srlv_epi32(a, count).as_i32x4(); transmute(simd_select_bitmask(k, shf, src.as_i32x4())) @@ -21827,7 +22595,8 @@ pub fn _mm_mask_srlv_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvd))] -pub fn _mm_maskz_srlv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srlv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srlv_epi32(a, count).as_i32x4(); transmute(simd_select_bitmask(k, shf, i32x4::ZERO)) @@ -21841,7 +22610,8 @@ pub fn _mm_maskz_srlv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvq))] -pub fn _mm512_sllv_epi64(a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_sllv_epi64(a: __m512i, count: __m512i) -> __m512i { unsafe { let count = count.as_u64x8(); let no_overflow: u64x8 = simd_lt(count, u64x8::splat(u64::BITS as u64)); @@ -21857,7 +22627,13 @@ pub fn _mm512_sllv_epi64(a: __m512i, count: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvq))] -pub fn _mm512_mask_sllv_epi64(src: __m512i, k: __mmask8, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_sllv_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, + count: __m512i, +) -> __m512i { unsafe { let shf = _mm512_sllv_epi64(a, count).as_i64x8(); transmute(simd_select_bitmask(k, shf, src.as_i64x8())) @@ -21871,7 +22647,8 @@ pub fn _mm512_mask_sllv_epi64(src: __m512i, k: __mmask8, a: __m512i, count: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvq))] -pub fn _mm512_maskz_sllv_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_sllv_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m512i { unsafe { let shf = _mm512_sllv_epi64(a, count).as_i64x8(); transmute(simd_select_bitmask(k, shf, i64x8::ZERO)) @@ -21885,7 +22662,13 @@ pub fn _mm512_maskz_sllv_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m51 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvq))] -pub fn _mm256_mask_sllv_epi64(src: __m256i, k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_sllv_epi64( + src: __m256i, + k: __mmask8, + a: __m256i, + count: __m256i, +) -> __m256i { unsafe { let shf = _mm256_sllv_epi64(a, count).as_i64x4(); transmute(simd_select_bitmask(k, shf, src.as_i64x4())) @@ -21899,7 +22682,8 @@ pub fn _mm256_mask_sllv_epi64(src: __m256i, k: __mmask8, a: __m256i, count: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvq))] -pub fn _mm256_maskz_sllv_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_sllv_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { unsafe { let shf = _mm256_sllv_epi64(a, count).as_i64x4(); transmute(simd_select_bitmask(k, shf, i64x4::ZERO)) @@ -21913,7 +22697,8 @@ pub fn _mm256_maskz_sllv_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvq))] -pub fn _mm_mask_sllv_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sllv_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_sllv_epi64(a, count).as_i64x2(); transmute(simd_select_bitmask(k, shf, src.as_i64x2())) @@ -21927,7 +22712,8 @@ pub fn _mm_mask_sllv_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsllvq))] -pub fn _mm_maskz_sllv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sllv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_sllv_epi64(a, count).as_i64x2(); transmute(simd_select_bitmask(k, shf, i64x2::ZERO)) @@ -21941,7 +22727,8 @@ pub fn _mm_maskz_sllv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvq))] -pub fn _mm512_srlv_epi64(a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_srlv_epi64(a: __m512i, count: __m512i) -> __m512i { unsafe { let count = count.as_u64x8(); let no_overflow: u64x8 = simd_lt(count, u64x8::splat(u64::BITS as u64)); @@ -21957,7 +22744,13 @@ pub fn _mm512_srlv_epi64(a: __m512i, count: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvq))] -pub fn _mm512_mask_srlv_epi64(src: __m512i, k: __mmask8, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_srlv_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, + count: __m512i, +) -> __m512i { unsafe { let shf = _mm512_srlv_epi64(a, count).as_i64x8(); transmute(simd_select_bitmask(k, shf, src.as_i64x8())) @@ -21971,7 +22764,8 @@ pub fn _mm512_mask_srlv_epi64(src: __m512i, k: __mmask8, a: __m512i, count: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvq))] -pub fn _mm512_maskz_srlv_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_srlv_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m512i { unsafe { let shf = _mm512_srlv_epi64(a, count).as_i64x8(); transmute(simd_select_bitmask(k, shf, i64x8::ZERO)) @@ -21985,7 +22779,13 @@ pub fn _mm512_maskz_srlv_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m51 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvq))] -pub fn _mm256_mask_srlv_epi64(src: __m256i, k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_srlv_epi64( + src: __m256i, + k: __mmask8, + a: __m256i, + count: __m256i, +) -> __m256i { unsafe { let shf = _mm256_srlv_epi64(a, count).as_i64x4(); transmute(simd_select_bitmask(k, shf, src.as_i64x4())) @@ -21999,7 +22799,8 @@ pub fn _mm256_mask_srlv_epi64(src: __m256i, k: __mmask8, a: __m256i, count: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvq))] -pub fn _mm256_maskz_srlv_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_srlv_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { unsafe { let shf = _mm256_srlv_epi64(a, count).as_i64x4(); transmute(simd_select_bitmask(k, shf, i64x4::ZERO)) @@ -22013,7 +22814,8 @@ pub fn _mm256_maskz_srlv_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvq))] -pub fn _mm_mask_srlv_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_srlv_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srlv_epi64(a, count).as_i64x2(); transmute(simd_select_bitmask(k, shf, src.as_i64x2())) @@ -22027,7 +22829,8 @@ pub fn _mm_mask_srlv_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpsrlvq))] -pub fn _mm_maskz_srlv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_srlv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { unsafe { let shf = _mm_srlv_epi64(a, count).as_i64x2(); transmute(simd_select_bitmask(k, shf, i64x2::ZERO)) @@ -22042,7 +22845,8 @@ pub fn _mm_maskz_srlv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_permute_ps(a: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_permute_ps(a: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); simd_shuffle!( @@ -22078,7 +22882,12 @@ pub fn _mm512_permute_ps(a: __m512) -> __m512 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_permute_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_permute_ps( + src: __m512, + k: __mmask16, + a: __m512, +) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm512_permute_ps::(a); @@ -22094,7 +22903,8 @@ pub fn _mm512_mask_permute_ps(src: __m512, k: __mmask16, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_permute_ps(k: __mmask16, a: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_permute_ps(k: __mmask16, a: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm512_permute_ps::(a); @@ -22110,7 +22920,12 @@ pub fn _mm512_maskz_permute_ps(k: __mmask16, a: __m512) -> __m5 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_permute_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_permute_ps( + src: __m256, + k: __mmask8, + a: __m256, +) -> __m256 { unsafe { let r = _mm256_permute_ps::(a); transmute(simd_select_bitmask(k, r.as_f32x8(), src.as_f32x8())) @@ -22125,7 +22940,8 @@ pub fn _mm256_mask_permute_ps(src: __m256, k: __mmask8, a: __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_permute_ps(k: __mmask8, a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_permute_ps(k: __mmask8, a: __m256) -> __m256 { unsafe { let r = _mm256_permute_ps::(a); transmute(simd_select_bitmask(k, r.as_f32x8(), f32x8::ZERO)) @@ -22140,7 +22956,8 @@ pub fn _mm256_maskz_permute_ps(k: __mmask8, a: __m256) -> __m25 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_permute_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_permute_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { unsafe { let r = _mm_permute_ps::(a); transmute(simd_select_bitmask(k, r.as_f32x4(), src.as_f32x4())) @@ -22155,7 +22972,8 @@ pub fn _mm_mask_permute_ps(src: __m128, k: __mmask8, a: __m128) #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_permute_ps(k: __mmask8, a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_permute_ps(k: __mmask8, a: __m128) -> __m128 { unsafe { let r = _mm_permute_ps::(a); transmute(simd_select_bitmask(k, r.as_f32x4(), f32x4::ZERO)) @@ -22170,7 +22988,8 @@ pub fn _mm_maskz_permute_ps(k: __mmask8, a: __m128) -> __m128 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_permute_pd(a: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_permute_pd(a: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); simd_shuffle!( @@ -22198,7 +23017,12 @@ pub fn _mm512_permute_pd(a: __m512d) -> __m512d { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_permute_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_permute_pd( + src: __m512d, + k: __mmask8, + a: __m512d, +) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm512_permute_pd::(a); @@ -22214,7 +23038,8 @@ pub fn _mm512_mask_permute_pd(src: __m512d, k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_permute_pd(k: __mmask8, a: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_permute_pd(k: __mmask8, a: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm512_permute_pd::(a); @@ -22230,7 +23055,12 @@ pub fn _mm512_maskz_permute_pd(k: __mmask8, a: __m512d) -> __m5 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_permute_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_permute_pd( + src: __m256d, + k: __mmask8, + a: __m256d, +) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 4); let r = _mm256_permute_pd::(a); @@ -22246,7 +23076,8 @@ pub fn _mm256_mask_permute_pd(src: __m256d, k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_permute_pd(k: __mmask8, a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_permute_pd(k: __mmask8, a: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 4); let r = _mm256_permute_pd::(a); @@ -22262,7 +23093,12 @@ pub fn _mm256_maskz_permute_pd(k: __mmask8, a: __m256d) -> __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, IMM2 = 0b01))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_permute_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_permute_pd( + src: __m128d, + k: __mmask8, + a: __m128d, +) -> __m128d { unsafe { static_assert_uimm_bits!(IMM2, 2); let r = _mm_permute_pd::(a); @@ -22278,7 +23114,8 @@ pub fn _mm_mask_permute_pd(src: __m128d, k: __mmask8, a: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, IMM2 = 0b01))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_permute_pd(k: __mmask8, a: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_permute_pd(k: __mmask8, a: __m128d) -> __m128d { unsafe { static_assert_uimm_bits!(IMM2, 2); let r = _mm_permute_pd::(a); @@ -22294,7 +23131,8 @@ pub fn _mm_maskz_permute_pd(k: __mmask8, a: __m128d) -> __m128d #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(1)] -pub fn _mm512_permutex_epi64(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_permutex_epi64(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); simd_shuffle!( @@ -22322,7 +23160,8 @@ pub fn _mm512_permutex_epi64(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_permutex_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_permutex_epi64( src: __m512i, k: __mmask8, a: __m512i, @@ -22342,7 +23181,8 @@ pub fn _mm512_mask_permutex_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_permutex_epi64(k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_permutex_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm512_permutex_epi64::(a); @@ -22358,7 +23198,8 @@ pub fn _mm512_maskz_permutex_epi64(k: __mmask8, a: __m512i) -> #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(1)] -pub fn _mm256_permutex_epi64(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_permutex_epi64(a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); simd_shuffle!( @@ -22382,7 +23223,8 @@ pub fn _mm256_permutex_epi64(a: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_permutex_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_permutex_epi64( src: __m256i, k: __mmask8, a: __m256i, @@ -22402,7 +23244,8 @@ pub fn _mm256_mask_permutex_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_permutex_epi64(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_permutex_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm256_permutex_epi64::(a); @@ -22418,7 +23261,8 @@ pub fn _mm256_maskz_permutex_epi64(k: __mmask8, a: __m256i) -> #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(1)] -pub fn _mm512_permutex_pd(a: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_permutex_pd(a: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); simd_shuffle!( @@ -22446,7 +23290,12 @@ pub fn _mm512_permutex_pd(a: __m512d) -> __m512d { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_permutex_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_permutex_pd( + src: __m512d, + k: __mmask8, + a: __m512d, +) -> __m512d { unsafe { let r = _mm512_permutex_pd::(a); transmute(simd_select_bitmask(k, r.as_f64x8(), src.as_f64x8())) @@ -22461,7 +23310,8 @@ pub fn _mm512_mask_permutex_pd(src: __m512d, k: __mmask8, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_permutex_pd(k: __mmask8, a: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_permutex_pd(k: __mmask8, a: __m512d) -> __m512d { unsafe { let r = _mm512_permutex_pd::(a); transmute(simd_select_bitmask(k, r.as_f64x8(), f64x8::ZERO)) @@ -22476,7 +23326,8 @@ pub fn _mm512_maskz_permutex_pd(k: __mmask8, a: __m512d) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(1)] -pub fn _mm256_permutex_pd(a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_permutex_pd(a: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); simd_shuffle!( @@ -22500,7 +23351,12 @@ pub fn _mm256_permutex_pd(a: __m256d) -> __m256d { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_permutex_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_permutex_pd( + src: __m256d, + k: __mmask8, + a: __m256d, +) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm256_permutex_pd::(a); @@ -22516,7 +23372,8 @@ pub fn _mm256_mask_permutex_pd(src: __m256d, k: __mmask8, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_permutex_pd(k: __mmask8, a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_permutex_pd(k: __mmask8, a: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm256_permutex_pd::(a); @@ -23786,7 +24643,8 @@ pub fn _mm_mask2_permutex2var_pd(a: __m128d, idx: __m128i, k: __mmask8, b: __m12 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 9))] //should be vpshufd #[rustc_legacy_const_generics(1)] -pub fn _mm512_shuffle_epi32(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shuffle_epi32(a: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); let r: i32x16 = simd_shuffle!( @@ -23823,7 +24681,8 @@ pub fn _mm512_shuffle_epi32(a: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_shuffle_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shuffle_epi32( src: __m512i, k: __mmask16, a: __m512i, @@ -23843,7 +24702,11 @@ pub fn _mm512_mask_shuffle_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_shuffle_epi32(k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shuffle_epi32( + k: __mmask16, + a: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_epi32::(a); @@ -23859,7 +24722,8 @@ pub fn _mm512_maskz_shuffle_epi32(k: __mmask16, a: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_shuffle_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shuffle_epi32( src: __m256i, k: __mmask8, a: __m256i, @@ -23879,7 +24743,11 @@ pub fn _mm256_mask_shuffle_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_shuffle_epi32(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shuffle_epi32( + k: __mmask8, + a: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_epi32::(a); @@ -23895,7 +24763,8 @@ pub fn _mm256_maskz_shuffle_epi32(k: __mmask8, a: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(3)] -pub fn _mm_mask_shuffle_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shuffle_epi32( src: __m128i, k: __mmask8, a: __m128i, @@ -23915,7 +24784,11 @@ pub fn _mm_mask_shuffle_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(2)] -pub fn _mm_maskz_shuffle_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shuffle_epi32( + k: __mmask8, + a: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm_shuffle_epi32::(a); @@ -23931,7 +24804,8 @@ pub fn _mm_maskz_shuffle_epi32(k: __mmask8, a: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_shuffle_ps(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shuffle_ps(a: __m512, b: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); simd_shuffle!( @@ -23967,7 +24841,8 @@ pub fn _mm512_shuffle_ps(a: __m512, b: __m512) -> __m512 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shuffle_ps( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shuffle_ps( src: __m512, k: __mmask16, a: __m512, @@ -23988,7 +24863,12 @@ pub fn _mm512_mask_shuffle_ps( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shuffle_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shuffle_ps( + k: __mmask16, + a: __m512, + b: __m512, +) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_ps::(a, b); @@ -24004,7 +24884,8 @@ pub fn _mm512_maskz_shuffle_ps(k: __mmask16, a: __m512, b: __m5 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shuffle_ps( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shuffle_ps( src: __m256, k: __mmask8, a: __m256, @@ -24025,7 +24906,8 @@ pub fn _mm256_mask_shuffle_ps( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shuffle_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shuffle_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_ps::(a, b); @@ -24041,7 +24923,8 @@ pub fn _mm256_maskz_shuffle_ps(k: __mmask8, a: __m256, b: __m25 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(4)] -pub fn _mm_mask_shuffle_ps( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shuffle_ps( src: __m128, k: __mmask8, a: __m128, @@ -24062,7 +24945,8 @@ pub fn _mm_mask_shuffle_ps( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(3)] -pub fn _mm_maskz_shuffle_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shuffle_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm_shuffle_ps::(a, b); @@ -24078,7 +24962,8 @@ pub fn _mm_maskz_shuffle_ps(k: __mmask8, a: __m128, b: __m128) #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_shuffle_pd(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shuffle_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); simd_shuffle!( @@ -24106,7 +24991,8 @@ pub fn _mm512_shuffle_pd(a: __m512d, b: __m512d) -> __m512d { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shuffle_pd( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shuffle_pd( src: __m512d, k: __mmask8, a: __m512d, @@ -24127,7 +25013,12 @@ pub fn _mm512_mask_shuffle_pd( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shuffle_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shuffle_pd( + k: __mmask8, + a: __m512d, + b: __m512d, +) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_pd::(a, b); @@ -24143,7 +25034,8 @@ pub fn _mm512_maskz_shuffle_pd(k: __mmask8, a: __m512d, b: __m5 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shuffle_pd( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shuffle_pd( src: __m256d, k: __mmask8, a: __m256d, @@ -24164,7 +25056,12 @@ pub fn _mm256_mask_shuffle_pd( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shuffle_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shuffle_pd( + k: __mmask8, + a: __m256d, + b: __m256d, +) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_pd::(a, b); @@ -24180,7 +25077,8 @@ pub fn _mm256_maskz_shuffle_pd(k: __mmask8, a: __m256d, b: __m2 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm_mask_shuffle_pd( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shuffle_pd( src: __m128d, k: __mmask8, a: __m128d, @@ -24201,7 +25099,8 @@ pub fn _mm_mask_shuffle_pd( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm_maskz_shuffle_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shuffle_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm_shuffle_pd::(a, b); @@ -24217,7 +25116,8 @@ pub fn _mm_maskz_shuffle_pd(k: __mmask8, a: __m128d, b: __m128d #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_01_01_01))] //should be vshufi32x4 #[rustc_legacy_const_generics(2)] -pub fn _mm512_shuffle_i32x4(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shuffle_i32x4(a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); let a = a.as_i32x16(); @@ -24256,7 +25156,8 @@ pub fn _mm512_shuffle_i32x4(a: __m512i, b: __m512i) -> __m512i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b10_11_01_01))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shuffle_i32x4( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shuffle_i32x4( src: __m512i, k: __mmask16, a: __m512i, @@ -24277,7 +25178,8 @@ pub fn _mm512_mask_shuffle_i32x4( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b10_11_01_01))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shuffle_i32x4( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shuffle_i32x4( k: __mmask16, a: __m512i, b: __m512i, @@ -24297,7 +25199,8 @@ pub fn _mm512_maskz_shuffle_i32x4( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b11))] //should be vshufi32x4 #[rustc_legacy_const_generics(2)] -pub fn _mm256_shuffle_i32x4(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shuffle_i32x4(a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); let a = a.as_i32x8(); @@ -24328,7 +25231,8 @@ pub fn _mm256_shuffle_i32x4(a: __m256i, b: __m256i) -> __m256i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b11))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shuffle_i32x4( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shuffle_i32x4( src: __m256i, k: __mmask8, a: __m256i, @@ -24349,7 +25253,12 @@ pub fn _mm256_mask_shuffle_i32x4( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b11))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shuffle_i32x4(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shuffle_i32x4( + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_i32x4::(a, b); @@ -24365,7 +25274,8 @@ pub fn _mm256_maskz_shuffle_i32x4(k: __mmask8, a: __m256i, b: _ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_shuffle_i64x2(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shuffle_i64x2(a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); let a = a.as_i64x8(); @@ -24396,7 +25306,8 @@ pub fn _mm512_shuffle_i64x2(a: __m512i, b: __m512i) -> __m512i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shuffle_i64x2( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shuffle_i64x2( src: __m512i, k: __mmask8, a: __m512i, @@ -24417,7 +25328,12 @@ pub fn _mm512_mask_shuffle_i64x2( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shuffle_i64x2(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shuffle_i64x2( + k: __mmask8, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_i64x2::(a, b); @@ -24433,7 +25349,8 @@ pub fn _mm512_maskz_shuffle_i64x2(k: __mmask8, a: __m512i, b: _ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshufi64x2 #[rustc_legacy_const_generics(2)] -pub fn _mm256_shuffle_i64x2(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shuffle_i64x2(a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); let a = a.as_i64x4(); @@ -24460,7 +25377,8 @@ pub fn _mm256_shuffle_i64x2(a: __m256i, b: __m256i) -> __m256i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b11))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shuffle_i64x2( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shuffle_i64x2( src: __m256i, k: __mmask8, a: __m256i, @@ -24481,7 +25399,12 @@ pub fn _mm256_mask_shuffle_i64x2( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b11))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shuffle_i64x2(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shuffle_i64x2( + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_i64x2::(a, b); @@ -24497,7 +25420,8 @@ pub fn _mm256_maskz_shuffle_i64x2(k: __mmask8, a: __m256i, b: _ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b1011))] //should be vshuff32x4, but generate vshuff64x2 #[rustc_legacy_const_generics(2)] -pub fn _mm512_shuffle_f32x4(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shuffle_f32x4(a: __m512, b: __m512) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); let a = a.as_f32x16(); @@ -24536,7 +25460,8 @@ pub fn _mm512_shuffle_f32x4(a: __m512, b: __m512) -> __m512 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b1011))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shuffle_f32x4( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shuffle_f32x4( src: __m512, k: __mmask16, a: __m512, @@ -24557,7 +25482,12 @@ pub fn _mm512_mask_shuffle_f32x4( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b1011))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shuffle_f32x4(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shuffle_f32x4( + k: __mmask16, + a: __m512, + b: __m512, +) -> __m512 { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_f32x4::(a, b); @@ -24573,7 +25503,8 @@ pub fn _mm512_maskz_shuffle_f32x4(k: __mmask16, a: __m512, b: _ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshuff32x4 #[rustc_legacy_const_generics(2)] -pub fn _mm256_shuffle_f32x4(a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shuffle_f32x4(a: __m256, b: __m256) -> __m256 { unsafe { static_assert_uimm_bits!(MASK, 8); let a = a.as_f32x8(); @@ -24604,7 +25535,8 @@ pub fn _mm256_shuffle_f32x4(a: __m256, b: __m256) -> __m256 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b11))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shuffle_f32x4( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shuffle_f32x4( src: __m256, k: __mmask8, a: __m256, @@ -24625,7 +25557,12 @@ pub fn _mm256_mask_shuffle_f32x4( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b11))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shuffle_f32x4(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shuffle_f32x4( + k: __mmask8, + a: __m256, + b: __m256, +) -> __m256 { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_f32x4::(a, b); @@ -24641,7 +25578,8 @@ pub fn _mm256_maskz_shuffle_f32x4(k: __mmask8, a: __m256, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_shuffle_f64x2(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shuffle_f64x2(a: __m512d, b: __m512d) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); let a = a.as_f64x8(); @@ -24672,7 +25610,8 @@ pub fn _mm512_shuffle_f64x2(a: __m512d, b: __m512d) -> __m512d #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shuffle_f64x2( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shuffle_f64x2( src: __m512d, k: __mmask8, a: __m512d, @@ -24693,7 +25632,12 @@ pub fn _mm512_mask_shuffle_f64x2( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shuffle_f64x2(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shuffle_f64x2( + k: __mmask8, + a: __m512d, + b: __m512d, +) -> __m512d { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm512_shuffle_f64x2::(a, b); @@ -24709,7 +25653,8 @@ pub fn _mm512_maskz_shuffle_f64x2(k: __mmask8, a: __m512d, b: _ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshuff64x2 #[rustc_legacy_const_generics(2)] -pub fn _mm256_shuffle_f64x2(a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shuffle_f64x2(a: __m256d, b: __m256d) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); let a = a.as_f64x4(); @@ -24736,7 +25681,8 @@ pub fn _mm256_shuffle_f64x2(a: __m256d, b: __m256d) -> __m256d #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b11))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shuffle_f64x2( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shuffle_f64x2( src: __m256d, k: __mmask8, a: __m256d, @@ -24757,7 +25703,12 @@ pub fn _mm256_mask_shuffle_f64x2( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b11))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shuffle_f64x2(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shuffle_f64x2( + k: __mmask8, + a: __m256d, + b: __m256d, +) -> __m256d { unsafe { static_assert_uimm_bits!(MASK, 8); let r = _mm256_shuffle_f64x2::(a, b); @@ -24773,7 +25724,8 @@ pub fn _mm256_maskz_shuffle_f64x2(k: __mmask8, a: __m256d, b: _ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf32x4, IMM8 = 3))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_extractf32x4_ps(a: __m512) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_extractf32x4_ps(a: __m512) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 2); match IMM8 & 0x3 { @@ -24793,7 +25745,12 @@ pub fn _mm512_extractf32x4_ps(a: __m512) -> __m128 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf32x4, IMM8 = 3))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_extractf32x4_ps(src: __m128, k: __mmask8, a: __m512) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_extractf32x4_ps( + src: __m128, + k: __mmask8, + a: __m512, +) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 2); let r = _mm512_extractf32x4_ps::(a); @@ -24809,7 +25766,8 @@ pub fn _mm512_mask_extractf32x4_ps(src: __m128, k: __mmask8, a: #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf32x4, IMM8 = 3))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_extractf32x4_ps(k: __mmask8, a: __m512) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_extractf32x4_ps(k: __mmask8, a: __m512) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 2); let r = _mm512_extractf32x4_ps::(a); @@ -24828,7 +25786,8 @@ pub fn _mm512_maskz_extractf32x4_ps(k: __mmask8, a: __m512) -> assert_instr(vextract, IMM8 = 1) //should be vextractf32x4 )] #[rustc_legacy_const_generics(1)] -pub fn _mm256_extractf32x4_ps(a: __m256) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extractf32x4_ps(a: __m256) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 1); match IMM8 & 0x1 { @@ -24846,7 +25805,12 @@ pub fn _mm256_extractf32x4_ps(a: __m256) -> __m128 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf32x4, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_extractf32x4_ps(src: __m128, k: __mmask8, a: __m256) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_extractf32x4_ps( + src: __m128, + k: __mmask8, + a: __m256, +) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 1); let r = _mm256_extractf32x4_ps::(a); @@ -24862,7 +25826,8 @@ pub fn _mm256_mask_extractf32x4_ps(src: __m128, k: __mmask8, a: #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf32x4, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_extractf32x4_ps(k: __mmask8, a: __m256) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_extractf32x4_ps(k: __mmask8, a: __m256) -> __m128 { unsafe { static_assert_uimm_bits!(IMM8, 1); let r = _mm256_extractf32x4_ps::(a); @@ -24881,7 +25846,8 @@ pub fn _mm256_maskz_extractf32x4_ps(k: __mmask8, a: __m256) -> assert_instr(vextractf64x4, IMM1 = 1) //should be vextracti64x4 )] #[rustc_legacy_const_generics(1)] -pub fn _mm512_extracti64x4_epi64(a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_extracti64x4_epi64(a: __m512i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM1, 1); match IMM1 { @@ -24899,7 +25865,8 @@ pub fn _mm512_extracti64x4_epi64(a: __m512i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti64x4, IMM1 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_extracti64x4_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_extracti64x4_epi64( src: __m256i, k: __mmask8, a: __m512i, @@ -24919,7 +25886,8 @@ pub fn _mm512_mask_extracti64x4_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti64x4, IMM1 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_extracti64x4_epi64(k: __mmask8, a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_extracti64x4_epi64(k: __mmask8, a: __m512i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM1, 1); let r = _mm512_extracti64x4_epi64::(a); @@ -24935,7 +25903,8 @@ pub fn _mm512_maskz_extracti64x4_epi64(k: __mmask8, a: __m512i) #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf64x4, IMM8 = 1))] #[rustc_legacy_const_generics(1)] -pub fn _mm512_extractf64x4_pd(a: __m512d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_extractf64x4_pd(a: __m512d) -> __m256d { unsafe { static_assert_uimm_bits!(IMM8, 1); match IMM8 & 0x1 { @@ -24953,7 +25922,8 @@ pub fn _mm512_extractf64x4_pd(a: __m512d) -> __m256d { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf64x4, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_extractf64x4_pd( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_extractf64x4_pd( src: __m256d, k: __mmask8, a: __m512d, @@ -24973,7 +25943,8 @@ pub fn _mm512_mask_extractf64x4_pd( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextractf64x4, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_extractf64x4_pd(k: __mmask8, a: __m512d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_extractf64x4_pd(k: __mmask8, a: __m512d) -> __m256d { unsafe { static_assert_uimm_bits!(IMM8, 1); let r = _mm512_extractf64x4_pd::(a); @@ -24992,7 +25963,8 @@ pub fn _mm512_maskz_extractf64x4_pd(k: __mmask8, a: __m512d) -> assert_instr(vextractf32x4, IMM2 = 3) //should be vextracti32x4 )] #[rustc_legacy_const_generics(1)] -pub fn _mm512_extracti32x4_epi32(a: __m512i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_extracti32x4_epi32(a: __m512i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM2, 2); let a = a.as_i32x16(); @@ -25015,7 +25987,8 @@ pub fn _mm512_extracti32x4_epi32(a: __m512i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti32x4, IMM2 = 3))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_mask_extracti32x4_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_extracti32x4_epi32( src: __m128i, k: __mmask8, a: __m512i, @@ -25035,7 +26008,8 @@ pub fn _mm512_mask_extracti32x4_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti32x4, IMM2 = 3))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_maskz_extracti32x4_epi32(k: __mmask8, a: __m512i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_extracti32x4_epi32(k: __mmask8, a: __m512i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM2, 2); let r = _mm512_extracti32x4_epi32::(a); @@ -25054,7 +26028,8 @@ pub fn _mm512_maskz_extracti32x4_epi32(k: __mmask8, a: __m512i) assert_instr(vextract, IMM1 = 1) //should be vextracti32x4 )] #[rustc_legacy_const_generics(1)] -pub fn _mm256_extracti32x4_epi32(a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extracti32x4_epi32(a: __m256i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM1, 1); let a = a.as_i32x8(); @@ -25075,7 +26050,8 @@ pub fn _mm256_extracti32x4_epi32(a: __m256i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti32x4, IMM1 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_mask_extracti32x4_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_extracti32x4_epi32( src: __m128i, k: __mmask8, a: __m256i, @@ -25095,7 +26071,8 @@ pub fn _mm256_mask_extracti32x4_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vextracti32x4, IMM1 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_maskz_extracti32x4_epi32(k: __mmask8, a: __m256i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_extracti32x4_epi32(k: __mmask8, a: __m256i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM1, 1); let r = _mm256_extracti32x4_epi32::(a); @@ -25110,7 +26087,8 @@ pub fn _mm256_maskz_extracti32x4_epi32(k: __mmask8, a: __m256i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovsldup))] -pub fn _mm512_moveldup_ps(a: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_moveldup_ps(a: __m512) -> __m512 { unsafe { let r: f32x16 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]); transmute(r) @@ -25124,7 +26102,8 @@ pub fn _mm512_moveldup_ps(a: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovsldup))] -pub fn _mm512_mask_moveldup_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_moveldup_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { unsafe { let mov: f32x16 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]); @@ -25139,7 +26118,8 @@ pub fn _mm512_mask_moveldup_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovsldup))] -pub fn _mm512_maskz_moveldup_ps(k: __mmask16, a: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_moveldup_ps(k: __mmask16, a: __m512) -> __m512 { unsafe { let mov: f32x16 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]); @@ -25154,7 +26134,8 @@ pub fn _mm512_maskz_moveldup_ps(k: __mmask16, a: __m512) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovsldup))] -pub fn _mm256_mask_moveldup_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_moveldup_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { unsafe { let mov = _mm256_moveldup_ps(a); transmute(simd_select_bitmask(k, mov.as_f32x8(), src.as_f32x8())) @@ -25168,7 +26149,8 @@ pub fn _mm256_mask_moveldup_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovsldup))] -pub fn _mm256_maskz_moveldup_ps(k: __mmask8, a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_moveldup_ps(k: __mmask8, a: __m256) -> __m256 { unsafe { let mov = _mm256_moveldup_ps(a); transmute(simd_select_bitmask(k, mov.as_f32x8(), f32x8::ZERO)) @@ -25182,7 +26164,8 @@ pub fn _mm256_maskz_moveldup_ps(k: __mmask8, a: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovsldup))] -pub fn _mm_mask_moveldup_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_moveldup_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { unsafe { let mov = _mm_moveldup_ps(a); transmute(simd_select_bitmask(k, mov.as_f32x4(), src.as_f32x4())) @@ -25196,7 +26179,8 @@ pub fn _mm_mask_moveldup_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovsldup))] -pub fn _mm_maskz_moveldup_ps(k: __mmask8, a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_moveldup_ps(k: __mmask8, a: __m128) -> __m128 { unsafe { let mov = _mm_moveldup_ps(a); transmute(simd_select_bitmask(k, mov.as_f32x4(), f32x4::ZERO)) @@ -25210,7 +26194,8 @@ pub fn _mm_maskz_moveldup_ps(k: __mmask8, a: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovshdup))] -pub fn _mm512_movehdup_ps(a: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_movehdup_ps(a: __m512) -> __m512 { unsafe { let r: f32x16 = simd_shuffle!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]); transmute(r) @@ -25224,7 +26209,8 @@ pub fn _mm512_movehdup_ps(a: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovshdup))] -pub fn _mm512_mask_movehdup_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_movehdup_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { unsafe { let mov: f32x16 = simd_shuffle!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]); @@ -25239,7 +26225,8 @@ pub fn _mm512_mask_movehdup_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovshdup))] -pub fn _mm512_maskz_movehdup_ps(k: __mmask16, a: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_movehdup_ps(k: __mmask16, a: __m512) -> __m512 { unsafe { let mov: f32x16 = simd_shuffle!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]); @@ -25254,7 +26241,8 @@ pub fn _mm512_maskz_movehdup_ps(k: __mmask16, a: __m512) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovshdup))] -pub fn _mm256_mask_movehdup_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_movehdup_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { unsafe { let mov = _mm256_movehdup_ps(a); transmute(simd_select_bitmask(k, mov.as_f32x8(), src.as_f32x8())) @@ -25268,7 +26256,8 @@ pub fn _mm256_mask_movehdup_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovshdup))] -pub fn _mm256_maskz_movehdup_ps(k: __mmask8, a: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_movehdup_ps(k: __mmask8, a: __m256) -> __m256 { unsafe { let mov = _mm256_movehdup_ps(a); transmute(simd_select_bitmask(k, mov.as_f32x8(), f32x8::ZERO)) @@ -25282,7 +26271,8 @@ pub fn _mm256_maskz_movehdup_ps(k: __mmask8, a: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovshdup))] -pub fn _mm_mask_movehdup_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_movehdup_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { unsafe { let mov = _mm_movehdup_ps(a); transmute(simd_select_bitmask(k, mov.as_f32x4(), src.as_f32x4())) @@ -25296,7 +26286,8 @@ pub fn _mm_mask_movehdup_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovshdup))] -pub fn _mm_maskz_movehdup_ps(k: __mmask8, a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_movehdup_ps(k: __mmask8, a: __m128) -> __m128 { unsafe { let mov = _mm_movehdup_ps(a); transmute(simd_select_bitmask(k, mov.as_f32x4(), f32x4::ZERO)) @@ -25310,7 +26301,8 @@ pub fn _mm_maskz_movehdup_ps(k: __mmask8, a: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovddup))] -pub fn _mm512_movedup_pd(a: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_movedup_pd(a: __m512d) -> __m512d { unsafe { let r: f64x8 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]); transmute(r) @@ -25324,7 +26316,8 @@ pub fn _mm512_movedup_pd(a: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovddup))] -pub fn _mm512_mask_movedup_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_movedup_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { unsafe { let mov: f64x8 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]); transmute(simd_select_bitmask(k, mov, src.as_f64x8())) @@ -25338,7 +26331,8 @@ pub fn _mm512_mask_movedup_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovddup))] -pub fn _mm512_maskz_movedup_pd(k: __mmask8, a: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_movedup_pd(k: __mmask8, a: __m512d) -> __m512d { unsafe { let mov: f64x8 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]); transmute(simd_select_bitmask(k, mov, f64x8::ZERO)) @@ -25352,7 +26346,8 @@ pub fn _mm512_maskz_movedup_pd(k: __mmask8, a: __m512d) -> __m512d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovddup))] -pub fn _mm256_mask_movedup_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_movedup_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { unsafe { let mov = _mm256_movedup_pd(a); transmute(simd_select_bitmask(k, mov.as_f64x4(), src.as_f64x4())) @@ -25366,7 +26361,8 @@ pub fn _mm256_mask_movedup_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovddup))] -pub fn _mm256_maskz_movedup_pd(k: __mmask8, a: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_movedup_pd(k: __mmask8, a: __m256d) -> __m256d { unsafe { let mov = _mm256_movedup_pd(a); transmute(simd_select_bitmask(k, mov.as_f64x4(), f64x4::ZERO)) @@ -25380,7 +26376,8 @@ pub fn _mm256_maskz_movedup_pd(k: __mmask8, a: __m256d) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovddup))] -pub fn _mm_mask_movedup_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_movedup_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { unsafe { let mov = _mm_movedup_pd(a); transmute(simd_select_bitmask(k, mov.as_f64x2(), src.as_f64x2())) @@ -25394,7 +26391,8 @@ pub fn _mm_mask_movedup_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovddup))] -pub fn _mm_maskz_movedup_pd(k: __mmask8, a: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_movedup_pd(k: __mmask8, a: __m128d) -> __m128d { unsafe { let mov = _mm_movedup_pd(a); transmute(simd_select_bitmask(k, mov.as_f64x2(), f64x2::ZERO)) @@ -25409,7 +26407,8 @@ pub fn _mm_maskz_movedup_pd(k: __mmask8, a: __m128d) -> __m128d { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] //should be vinserti32x4 #[rustc_legacy_const_generics(2)] -pub fn _mm512_inserti32x4(a: __m512i, b: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_inserti32x4(a: __m512i, b: __m128i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 2); let a = a.as_i32x16(); @@ -25452,7 +26451,8 @@ pub fn _mm512_inserti32x4(a: __m512i, b: __m128i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 2))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_inserti32x4( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_inserti32x4( src: __m512i, k: __mmask16, a: __m512i, @@ -25473,7 +26473,12 @@ pub fn _mm512_mask_inserti32x4( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 2))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_inserti32x4(k: __mmask16, a: __m512i, b: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_inserti32x4( + k: __mmask16, + a: __m512i, + b: __m128i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 2); let r = _mm512_inserti32x4::(a, b); @@ -25492,7 +26497,8 @@ pub fn _mm512_maskz_inserti32x4(k: __mmask16, a: __m512i, b: __ assert_instr(vinsert, IMM8 = 1) //should be vinserti32x4 )] #[rustc_legacy_const_generics(2)] -pub fn _mm256_inserti32x4(a: __m256i, b: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_inserti32x4(a: __m256i, b: __m128i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 1); let a = a.as_i32x8(); @@ -25513,7 +26519,8 @@ pub fn _mm256_inserti32x4(a: __m256i, b: __m128i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_inserti32x4( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_inserti32x4( src: __m256i, k: __mmask8, a: __m256i, @@ -25534,7 +26541,12 @@ pub fn _mm256_mask_inserti32x4( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_inserti32x4(k: __mmask8, a: __m256i, b: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_inserti32x4( + k: __mmask8, + a: __m256i, + b: __m128i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 1); let r = _mm256_inserti32x4::(a, b); @@ -25550,7 +26562,8 @@ pub fn _mm256_maskz_inserti32x4(k: __mmask8, a: __m256i, b: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] //should be vinserti64x4 #[rustc_legacy_const_generics(2)] -pub fn _mm512_inserti64x4(a: __m512i, b: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_inserti64x4(a: __m512i, b: __m256i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 1); let b = _mm512_castsi256_si512(b); @@ -25569,7 +26582,8 @@ pub fn _mm512_inserti64x4(a: __m512i, b: __m256i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti64x4, IMM8 = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_inserti64x4( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_inserti64x4( src: __m512i, k: __mmask8, a: __m512i, @@ -25590,7 +26604,12 @@ pub fn _mm512_mask_inserti64x4( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinserti64x4, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_inserti64x4(k: __mmask8, a: __m512i, b: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_inserti64x4( + k: __mmask8, + a: __m512i, + b: __m256i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 1); let r = _mm512_inserti64x4::(a, b); @@ -25606,7 +26625,8 @@ pub fn _mm512_maskz_inserti64x4(k: __mmask8, a: __m512i, b: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_insertf32x4(a: __m512, b: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_insertf32x4(a: __m512, b: __m128) -> __m512 { unsafe { static_assert_uimm_bits!(IMM8, 2); let b = _mm512_castps128_ps512(b); @@ -25647,7 +26667,8 @@ pub fn _mm512_insertf32x4(a: __m512, b: __m128) -> __m512 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_insertf32x4( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_insertf32x4( src: __m512, k: __mmask16, a: __m512, @@ -25668,7 +26689,12 @@ pub fn _mm512_mask_insertf32x4( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_insertf32x4(k: __mmask16, a: __m512, b: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_insertf32x4( + k: __mmask16, + a: __m512, + b: __m128, +) -> __m512 { unsafe { static_assert_uimm_bits!(IMM8, 2); let r = _mm512_insertf32x4::(a, b); @@ -25687,7 +26713,8 @@ pub fn _mm512_maskz_insertf32x4(k: __mmask16, a: __m512, b: __m assert_instr(vinsert, IMM8 = 1) //should be vinsertf32x4 )] #[rustc_legacy_const_generics(2)] -pub fn _mm256_insertf32x4(a: __m256, b: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_insertf32x4(a: __m256, b: __m128) -> __m256 { unsafe { static_assert_uimm_bits!(IMM8, 1); let b = _mm256_castps128_ps256(b); @@ -25706,7 +26733,8 @@ pub fn _mm256_insertf32x4(a: __m256, b: __m128) -> __m256 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_insertf32x4( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_insertf32x4( src: __m256, k: __mmask8, a: __m256, @@ -25727,7 +26755,12 @@ pub fn _mm256_mask_insertf32x4( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_insertf32x4(k: __mmask8, a: __m256, b: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_insertf32x4( + k: __mmask8, + a: __m256, + b: __m128, +) -> __m256 { unsafe { static_assert_uimm_bits!(IMM8, 1); let r = _mm256_insertf32x4::(a, b); @@ -25743,7 +26776,8 @@ pub fn _mm256_maskz_insertf32x4(k: __mmask8, a: __m256, b: __m1 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_insertf64x4(a: __m512d, b: __m256d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_insertf64x4(a: __m512d, b: __m256d) -> __m512d { unsafe { static_assert_uimm_bits!(IMM8, 1); let b = _mm512_castpd256_pd512(b); @@ -25762,7 +26796,8 @@ pub fn _mm512_insertf64x4(a: __m512d, b: __m256d) -> __m512d { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_insertf64x4( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_insertf64x4( src: __m512d, k: __mmask8, a: __m512d, @@ -25783,7 +26818,12 @@ pub fn _mm512_mask_insertf64x4( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_insertf64x4(k: __mmask8, a: __m512d, b: __m256d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_insertf64x4( + k: __mmask8, + a: __m512d, + b: __m256d, +) -> __m512d { unsafe { static_assert_uimm_bits!(IMM8, 1); let r = _mm512_insertf64x4::(a, b); @@ -25798,7 +26838,8 @@ pub fn _mm512_maskz_insertf64x4(k: __mmask8, a: __m512d, b: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhps))] //should be vpunpckhdq -pub fn _mm512_unpackhi_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpackhi_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = a.as_i32x16(); let b = b.as_i32x16(); @@ -25821,7 +26862,13 @@ pub fn _mm512_unpackhi_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhdq))] -pub fn _mm512_mask_unpackhi_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpackhi_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let unpackhi = _mm512_unpackhi_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, unpackhi, src.as_i32x16())) @@ -25835,7 +26882,8 @@ pub fn _mm512_mask_unpackhi_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhdq))] -pub fn _mm512_maskz_unpackhi_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpackhi_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let unpackhi = _mm512_unpackhi_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, unpackhi, i32x16::ZERO)) @@ -25849,7 +26897,13 @@ pub fn _mm512_maskz_unpackhi_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhdq))] -pub fn _mm256_mask_unpackhi_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpackhi_epi32( + src: __m256i, + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let unpackhi = _mm256_unpackhi_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, unpackhi, src.as_i32x8())) @@ -25863,7 +26917,8 @@ pub fn _mm256_mask_unpackhi_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhdq))] -pub fn _mm256_maskz_unpackhi_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpackhi_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let unpackhi = _mm256_unpackhi_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, unpackhi, i32x8::ZERO)) @@ -25877,7 +26932,8 @@ pub fn _mm256_maskz_unpackhi_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhdq))] -pub fn _mm_mask_unpackhi_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpackhi_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpackhi = _mm_unpackhi_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, unpackhi, src.as_i32x4())) @@ -25891,7 +26947,8 @@ pub fn _mm_mask_unpackhi_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhdq))] -pub fn _mm_maskz_unpackhi_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpackhi_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpackhi = _mm_unpackhi_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, unpackhi, i32x4::ZERO)) @@ -25905,7 +26962,8 @@ pub fn _mm_maskz_unpackhi_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhpd))] //should be vpunpckhqdq -pub fn _mm512_unpackhi_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpackhi_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_shuffle!(a, b, [1, 9, 1 + 2, 9 + 2, 1 + 4, 9 + 4, 1 + 6, 9 + 6]) } } @@ -25916,7 +26974,13 @@ pub fn _mm512_unpackhi_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] -pub fn _mm512_mask_unpackhi_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpackhi_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let unpackhi = _mm512_unpackhi_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, unpackhi, src.as_i64x8())) @@ -25930,7 +26994,8 @@ pub fn _mm512_mask_unpackhi_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] -pub fn _mm512_maskz_unpackhi_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpackhi_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let unpackhi = _mm512_unpackhi_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, unpackhi, i64x8::ZERO)) @@ -25944,7 +27009,13 @@ pub fn _mm512_maskz_unpackhi_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m51 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] -pub fn _mm256_mask_unpackhi_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpackhi_epi64( + src: __m256i, + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let unpackhi = _mm256_unpackhi_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, unpackhi, src.as_i64x4())) @@ -25958,7 +27029,8 @@ pub fn _mm256_mask_unpackhi_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] -pub fn _mm256_maskz_unpackhi_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpackhi_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let unpackhi = _mm256_unpackhi_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, unpackhi, i64x4::ZERO)) @@ -25972,7 +27044,8 @@ pub fn _mm256_maskz_unpackhi_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] -pub fn _mm_mask_unpackhi_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpackhi_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpackhi = _mm_unpackhi_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, unpackhi, src.as_i64x2())) @@ -25986,7 +27059,8 @@ pub fn _mm_mask_unpackhi_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] -pub fn _mm_maskz_unpackhi_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpackhi_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpackhi = _mm_unpackhi_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, unpackhi, i64x2::ZERO)) @@ -26000,7 +27074,8 @@ pub fn _mm_maskz_unpackhi_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhps))] -pub fn _mm512_unpackhi_ps(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpackhi_ps(a: __m512, b: __m512) -> __m512 { unsafe { #[rustfmt::skip] simd_shuffle!( @@ -26020,7 +27095,8 @@ pub fn _mm512_unpackhi_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhps))] -pub fn _mm512_mask_unpackhi_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpackhi_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let unpackhi = _mm512_unpackhi_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, unpackhi, src.as_f32x16())) @@ -26034,7 +27110,8 @@ pub fn _mm512_mask_unpackhi_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhps))] -pub fn _mm512_maskz_unpackhi_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpackhi_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let unpackhi = _mm512_unpackhi_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, unpackhi, f32x16::ZERO)) @@ -26048,7 +27125,8 @@ pub fn _mm512_maskz_unpackhi_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhps))] -pub fn _mm256_mask_unpackhi_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpackhi_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let unpackhi = _mm256_unpackhi_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, unpackhi, src.as_f32x8())) @@ -26062,7 +27140,8 @@ pub fn _mm256_mask_unpackhi_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhps))] -pub fn _mm256_maskz_unpackhi_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpackhi_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let unpackhi = _mm256_unpackhi_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, unpackhi, f32x8::ZERO)) @@ -26076,7 +27155,8 @@ pub fn _mm256_maskz_unpackhi_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhps))] -pub fn _mm_mask_unpackhi_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpackhi_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let unpackhi = _mm_unpackhi_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, unpackhi, src.as_f32x4())) @@ -26090,7 +27170,8 @@ pub fn _mm_mask_unpackhi_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhps))] -pub fn _mm_maskz_unpackhi_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpackhi_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let unpackhi = _mm_unpackhi_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, unpackhi, f32x4::ZERO)) @@ -26104,7 +27185,8 @@ pub fn _mm_maskz_unpackhi_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhpd))] -pub fn _mm512_unpackhi_pd(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpackhi_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { simd_shuffle!(a, b, [1, 9, 1 + 2, 9 + 2, 1 + 4, 9 + 4, 1 + 6, 9 + 6]) } } @@ -26115,7 +27197,8 @@ pub fn _mm512_unpackhi_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhpd))] -pub fn _mm512_mask_unpackhi_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpackhi_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let unpackhi = _mm512_unpackhi_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, unpackhi, src.as_f64x8())) @@ -26129,7 +27212,8 @@ pub fn _mm512_mask_unpackhi_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhpd))] -pub fn _mm512_maskz_unpackhi_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpackhi_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let unpackhi = _mm512_unpackhi_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, unpackhi, f64x8::ZERO)) @@ -26143,7 +27227,8 @@ pub fn _mm512_maskz_unpackhi_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhpd))] -pub fn _mm256_mask_unpackhi_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpackhi_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let unpackhi = _mm256_unpackhi_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, unpackhi, src.as_f64x4())) @@ -26157,7 +27242,8 @@ pub fn _mm256_mask_unpackhi_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhpd))] -pub fn _mm256_maskz_unpackhi_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpackhi_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let unpackhi = _mm256_unpackhi_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, unpackhi, f64x4::ZERO)) @@ -26171,7 +27257,8 @@ pub fn _mm256_maskz_unpackhi_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhpd))] -pub fn _mm_mask_unpackhi_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpackhi_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let unpackhi = _mm_unpackhi_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, unpackhi, src.as_f64x2())) @@ -26185,7 +27272,8 @@ pub fn _mm_mask_unpackhi_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpckhpd))] -pub fn _mm_maskz_unpackhi_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpackhi_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let unpackhi = _mm_unpackhi_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, unpackhi, f64x2::ZERO)) @@ -26199,7 +27287,8 @@ pub fn _mm_maskz_unpackhi_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklps))] //should be vpunpckldq -pub fn _mm512_unpacklo_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpacklo_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { let a = a.as_i32x16(); let b = b.as_i32x16(); @@ -26222,7 +27311,13 @@ pub fn _mm512_unpacklo_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckldq))] -pub fn _mm512_mask_unpacklo_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpacklo_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let unpacklo = _mm512_unpacklo_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, unpacklo, src.as_i32x16())) @@ -26236,7 +27331,8 @@ pub fn _mm512_mask_unpacklo_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckldq))] -pub fn _mm512_maskz_unpacklo_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpacklo_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let unpacklo = _mm512_unpacklo_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, unpacklo, i32x16::ZERO)) @@ -26250,7 +27346,13 @@ pub fn _mm512_maskz_unpacklo_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m5 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckldq))] -pub fn _mm256_mask_unpacklo_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpacklo_epi32( + src: __m256i, + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let unpacklo = _mm256_unpacklo_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, unpacklo, src.as_i32x8())) @@ -26264,7 +27366,8 @@ pub fn _mm256_mask_unpacklo_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckldq))] -pub fn _mm256_maskz_unpacklo_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpacklo_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let unpacklo = _mm256_unpacklo_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, unpacklo, i32x8::ZERO)) @@ -26278,7 +27381,8 @@ pub fn _mm256_maskz_unpacklo_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckldq))] -pub fn _mm_mask_unpacklo_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpacklo_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpacklo = _mm_unpacklo_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, unpacklo, src.as_i32x4())) @@ -26292,7 +27396,8 @@ pub fn _mm_mask_unpacklo_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpckldq))] -pub fn _mm_maskz_unpacklo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpacklo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpacklo = _mm_unpacklo_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, unpacklo, i32x4::ZERO)) @@ -26306,7 +27411,8 @@ pub fn _mm_maskz_unpacklo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklpd))] //should be vpunpcklqdq -pub fn _mm512_unpacklo_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpacklo_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { simd_shuffle!(a, b, [0, 8, 0 + 2, 8 + 2, 0 + 4, 8 + 4, 0 + 6, 8 + 6]) } } @@ -26317,7 +27423,13 @@ pub fn _mm512_unpacklo_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] -pub fn _mm512_mask_unpacklo_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpacklo_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let unpacklo = _mm512_unpacklo_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, unpacklo, src.as_i64x8())) @@ -26331,7 +27443,8 @@ pub fn _mm512_mask_unpacklo_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] -pub fn _mm512_maskz_unpacklo_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpacklo_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let unpacklo = _mm512_unpacklo_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, unpacklo, i64x8::ZERO)) @@ -26345,7 +27458,13 @@ pub fn _mm512_maskz_unpacklo_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m51 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] -pub fn _mm256_mask_unpacklo_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpacklo_epi64( + src: __m256i, + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let unpacklo = _mm256_unpacklo_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, unpacklo, src.as_i64x4())) @@ -26359,7 +27478,8 @@ pub fn _mm256_mask_unpacklo_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] -pub fn _mm256_maskz_unpacklo_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpacklo_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let unpacklo = _mm256_unpacklo_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, unpacklo, i64x4::ZERO)) @@ -26373,7 +27493,8 @@ pub fn _mm256_maskz_unpacklo_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m25 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] -pub fn _mm_mask_unpacklo_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpacklo_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpacklo = _mm_unpacklo_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, unpacklo, src.as_i64x2())) @@ -26387,7 +27508,8 @@ pub fn _mm_mask_unpacklo_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] -pub fn _mm_maskz_unpacklo_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpacklo_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let unpacklo = _mm_unpacklo_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, unpacklo, i64x2::ZERO)) @@ -26401,7 +27523,8 @@ pub fn _mm_maskz_unpacklo_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklps))] -pub fn _mm512_unpacklo_ps(a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpacklo_ps(a: __m512, b: __m512) -> __m512 { unsafe { #[rustfmt::skip] simd_shuffle!(a, b, @@ -26420,7 +27543,8 @@ pub fn _mm512_unpacklo_ps(a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklps))] -pub fn _mm512_mask_unpacklo_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpacklo_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let unpacklo = _mm512_unpacklo_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, unpacklo, src.as_f32x16())) @@ -26434,7 +27558,8 @@ pub fn _mm512_mask_unpacklo_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklps))] -pub fn _mm512_maskz_unpacklo_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpacklo_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { let unpacklo = _mm512_unpacklo_ps(a, b).as_f32x16(); transmute(simd_select_bitmask(k, unpacklo, f32x16::ZERO)) @@ -26448,7 +27573,8 @@ pub fn _mm512_maskz_unpacklo_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklps))] -pub fn _mm256_mask_unpacklo_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpacklo_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let unpacklo = _mm256_unpacklo_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, unpacklo, src.as_f32x8())) @@ -26462,7 +27588,8 @@ pub fn _mm256_mask_unpacklo_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklps))] -pub fn _mm256_maskz_unpacklo_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpacklo_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { let unpacklo = _mm256_unpacklo_ps(a, b).as_f32x8(); transmute(simd_select_bitmask(k, unpacklo, f32x8::ZERO)) @@ -26476,7 +27603,8 @@ pub fn _mm256_maskz_unpacklo_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklps))] -pub fn _mm_mask_unpacklo_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpacklo_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let unpacklo = _mm_unpacklo_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, unpacklo, src.as_f32x4())) @@ -26490,7 +27618,8 @@ pub fn _mm_mask_unpacklo_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklps))] -pub fn _mm_maskz_unpacklo_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpacklo_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let unpacklo = _mm_unpacklo_ps(a, b).as_f32x4(); transmute(simd_select_bitmask(k, unpacklo, f32x4::ZERO)) @@ -26504,7 +27633,8 @@ pub fn _mm_maskz_unpacklo_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklpd))] -pub fn _mm512_unpacklo_pd(a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_unpacklo_pd(a: __m512d, b: __m512d) -> __m512d { unsafe { simd_shuffle!(a, b, [0, 8, 0 + 2, 8 + 2, 0 + 4, 8 + 4, 0 + 6, 8 + 6]) } } @@ -26515,7 +27645,8 @@ pub fn _mm512_unpacklo_pd(a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklpd))] -pub fn _mm512_mask_unpacklo_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_unpacklo_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let unpacklo = _mm512_unpacklo_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, unpacklo, src.as_f64x8())) @@ -26529,7 +27660,8 @@ pub fn _mm512_mask_unpacklo_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklpd))] -pub fn _mm512_maskz_unpacklo_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_unpacklo_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { let unpacklo = _mm512_unpacklo_pd(a, b).as_f64x8(); transmute(simd_select_bitmask(k, unpacklo, f64x8::ZERO)) @@ -26543,7 +27675,8 @@ pub fn _mm512_maskz_unpacklo_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklpd))] -pub fn _mm256_mask_unpacklo_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_unpacklo_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let unpacklo = _mm256_unpacklo_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, unpacklo, src.as_f64x4())) @@ -26557,7 +27690,8 @@ pub fn _mm256_mask_unpacklo_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklpd))] -pub fn _mm256_maskz_unpacklo_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_unpacklo_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { let unpacklo = _mm256_unpacklo_pd(a, b).as_f64x4(); transmute(simd_select_bitmask(k, unpacklo, f64x4::ZERO)) @@ -26571,7 +27705,8 @@ pub fn _mm256_maskz_unpacklo_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklpd))] -pub fn _mm_mask_unpacklo_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_unpacklo_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let unpacklo = _mm_unpacklo_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, unpacklo, src.as_f64x2())) @@ -26585,7 +27720,8 @@ pub fn _mm_mask_unpacklo_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vunpcklpd))] -pub fn _mm_maskz_unpacklo_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_unpacklo_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let unpacklo = _mm_unpacklo_pd(a, b).as_f64x2(); transmute(simd_select_bitmask(k, unpacklo, f64x2::ZERO)) @@ -26598,7 +27734,8 @@ pub fn _mm_maskz_unpacklo_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castps128_ps512(a: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castps128_ps512(a: __m128) -> __m512 { unsafe { simd_shuffle!( a, @@ -26614,7 +27751,8 @@ pub fn _mm512_castps128_ps512(a: __m128) -> __m512 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castps256_ps512(a: __m256) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castps256_ps512(a: __m256) -> __m512 { unsafe { simd_shuffle!( a, @@ -26630,7 +27768,8 @@ pub fn _mm512_castps256_ps512(a: __m256) -> __m512 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_zextps128_ps512(a: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_zextps128_ps512(a: __m128) -> __m512 { unsafe { simd_shuffle!( a, @@ -26646,7 +27785,8 @@ pub fn _mm512_zextps128_ps512(a: __m128) -> __m512 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_zextps256_ps512(a: __m256) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_zextps256_ps512(a: __m256) -> __m512 { unsafe { simd_shuffle!( a, @@ -26662,7 +27802,8 @@ pub fn _mm512_zextps256_ps512(a: __m256) -> __m512 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castps512_ps128(a: __m512) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castps512_ps128(a: __m512) -> __m128 { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } } @@ -26672,7 +27813,8 @@ pub fn _mm512_castps512_ps128(a: __m512) -> __m128 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castps512_ps256(a: __m512) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castps512_ps256(a: __m512) -> __m256 { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } } @@ -26682,7 +27824,8 @@ pub fn _mm512_castps512_ps256(a: __m512) -> __m256 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castps_pd(a: __m512) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castps_pd(a: __m512) -> __m512d { unsafe { transmute(a) } } @@ -26692,7 +27835,8 @@ pub fn _mm512_castps_pd(a: __m512) -> __m512d { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castps_si512(a: __m512) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castps_si512(a: __m512) -> __m512i { unsafe { transmute(a) } } @@ -26702,7 +27846,8 @@ pub fn _mm512_castps_si512(a: __m512) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castpd128_pd512(a: __m128d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castpd128_pd512(a: __m128d) -> __m512d { unsafe { simd_shuffle!(a, _mm_undefined_pd(), [0, 1, 2, 2, 2, 2, 2, 2]) } } @@ -26712,7 +27857,8 @@ pub fn _mm512_castpd128_pd512(a: __m128d) -> __m512d { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castpd256_pd512(a: __m256d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castpd256_pd512(a: __m256d) -> __m512d { unsafe { simd_shuffle!(a, _mm256_undefined_pd(), [0, 1, 2, 3, 4, 4, 4, 4]) } } @@ -26722,7 +27868,8 @@ pub fn _mm512_castpd256_pd512(a: __m256d) -> __m512d { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_zextpd128_pd512(a: __m128d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_zextpd128_pd512(a: __m128d) -> __m512d { unsafe { simd_shuffle!(a, _mm_set1_pd(0.), [0, 1, 2, 2, 2, 2, 2, 2]) } } @@ -26732,7 +27879,8 @@ pub fn _mm512_zextpd128_pd512(a: __m128d) -> __m512d { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_zextpd256_pd512(a: __m256d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_zextpd256_pd512(a: __m256d) -> __m512d { unsafe { simd_shuffle!(a, _mm256_set1_pd(0.), [0, 1, 2, 3, 4, 4, 4, 4]) } } @@ -26742,7 +27890,8 @@ pub fn _mm512_zextpd256_pd512(a: __m256d) -> __m512d { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castpd512_pd128(a: __m512d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castpd512_pd128(a: __m512d) -> __m128d { unsafe { simd_shuffle!(a, a, [0, 1]) } } @@ -26752,7 +27901,8 @@ pub fn _mm512_castpd512_pd128(a: __m512d) -> __m128d { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castpd512_pd256(a: __m512d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castpd512_pd256(a: __m512d) -> __m256d { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } } @@ -26762,7 +27912,8 @@ pub fn _mm512_castpd512_pd256(a: __m512d) -> __m256d { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castpd_ps(a: __m512d) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castpd_ps(a: __m512d) -> __m512 { unsafe { transmute(a) } } @@ -26772,7 +27923,8 @@ pub fn _mm512_castpd_ps(a: __m512d) -> __m512 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castpd_si512(a: __m512d) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castpd_si512(a: __m512d) -> __m512i { unsafe { transmute(a) } } @@ -26782,7 +27934,8 @@ pub fn _mm512_castpd_si512(a: __m512d) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castsi128_si512(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castsi128_si512(a: __m128i) -> __m512i { unsafe { simd_shuffle!(a, _mm_undefined_si128(), [0, 1, 2, 2, 2, 2, 2, 2]) } } @@ -26792,7 +27945,8 @@ pub fn _mm512_castsi128_si512(a: __m128i) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castsi256_si512(a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castsi256_si512(a: __m256i) -> __m512i { unsafe { simd_shuffle!(a, _mm256_undefined_si256(), [0, 1, 2, 3, 4, 4, 4, 4]) } } @@ -26802,7 +27956,8 @@ pub fn _mm512_castsi256_si512(a: __m256i) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_zextsi128_si512(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_zextsi128_si512(a: __m128i) -> __m512i { unsafe { simd_shuffle!(a, _mm_setzero_si128(), [0, 1, 2, 2, 2, 2, 2, 2]) } } @@ -26812,7 +27967,8 @@ pub fn _mm512_zextsi128_si512(a: __m128i) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_zextsi256_si512(a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_zextsi256_si512(a: __m256i) -> __m512i { unsafe { simd_shuffle!(a, _mm256_setzero_si256(), [0, 1, 2, 3, 4, 4, 4, 4]) } } @@ -26822,7 +27978,8 @@ pub fn _mm512_zextsi256_si512(a: __m256i) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castsi512_si128(a: __m512i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castsi512_si128(a: __m512i) -> __m128i { unsafe { simd_shuffle!(a, a, [0, 1]) } } @@ -26832,7 +27989,8 @@ pub fn _mm512_castsi512_si128(a: __m512i) -> __m128i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castsi512_si256(a: __m512i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castsi512_si256(a: __m512i) -> __m256i { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } } @@ -26842,7 +28000,8 @@ pub fn _mm512_castsi512_si256(a: __m512i) -> __m256i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castsi512_ps(a: __m512i) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castsi512_ps(a: __m512i) -> __m512 { unsafe { transmute(a) } } @@ -26852,7 +28011,8 @@ pub fn _mm512_castsi512_ps(a: __m512i) -> __m512 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_castsi512_pd(a: __m512i) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castsi512_pd(a: __m512i) -> __m512d { unsafe { transmute(a) } } @@ -26863,7 +28023,8 @@ pub fn _mm512_castsi512_pd(a: __m512i) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovd))] -pub fn _mm512_cvtsi512_si32(a: __m512i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtsi512_si32(a: __m512i) -> i32 { unsafe { simd_extract!(a.as_i32x16(), 0) } } @@ -26873,7 +28034,8 @@ pub fn _mm512_cvtsi512_si32(a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_cvtss_f32(a: __m512) -> f32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtss_f32(a: __m512) -> f32 { unsafe { simd_extract!(a, 0) } } @@ -26883,7 +28045,8 @@ pub fn _mm512_cvtss_f32(a: __m512) -> f32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_cvtsd_f64(a: __m512d) -> f64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtsd_f64(a: __m512d) -> f64 { unsafe { simd_extract!(a, 0) } } @@ -26894,7 +28057,8 @@ pub fn _mm512_cvtsd_f64(a: __m512d) -> f64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcast))] //should be vpbroadcastd -pub fn _mm512_broadcastd_epi32(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcastd_epi32(a: __m128i) -> __m512i { unsafe { let a = _mm512_castsi128_si512(a).as_i32x16(); let ret: i32x16 = simd_shuffle!(a, a, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); @@ -26909,7 +28073,8 @@ pub fn _mm512_broadcastd_epi32(a: __m128i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd -pub fn _mm512_mask_broadcastd_epi32(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcastd_epi32(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { unsafe { let broadcast = _mm512_broadcastd_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, broadcast, src.as_i32x16())) @@ -26923,7 +28088,8 @@ pub fn _mm512_mask_broadcastd_epi32(src: __m512i, k: __mmask16, a: __m128i) -> _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd -pub fn _mm512_maskz_broadcastd_epi32(k: __mmask16, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcastd_epi32(k: __mmask16, a: __m128i) -> __m512i { unsafe { let broadcast = _mm512_broadcastd_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, broadcast, i32x16::ZERO)) @@ -26937,7 +28103,8 @@ pub fn _mm512_maskz_broadcastd_epi32(k: __mmask16, a: __m128i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd -pub fn _mm256_mask_broadcastd_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcastd_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let broadcast = _mm256_broadcastd_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, broadcast, src.as_i32x8())) @@ -26951,7 +28118,8 @@ pub fn _mm256_mask_broadcastd_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd -pub fn _mm256_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m256i { unsafe { let broadcast = _mm256_broadcastd_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, broadcast, i32x8::ZERO)) @@ -26965,7 +28133,8 @@ pub fn _mm256_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd -pub fn _mm_mask_broadcastd_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_broadcastd_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let broadcast = _mm_broadcastd_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, broadcast, src.as_i32x4())) @@ -26979,7 +28148,8 @@ pub fn _mm_mask_broadcastd_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m12 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd -pub fn _mm_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { let broadcast = _mm_broadcastd_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, broadcast, i32x4::ZERO)) @@ -26993,7 +28163,8 @@ pub fn _mm_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcast))] //should be vpbroadcastq -pub fn _mm512_broadcastq_epi64(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcastq_epi64(a: __m128i) -> __m512i { unsafe { simd_shuffle!(a, a, [0, 0, 0, 0, 0, 0, 0, 0]) } } @@ -27004,7 +28175,8 @@ pub fn _mm512_broadcastq_epi64(a: __m128i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq -pub fn _mm512_mask_broadcastq_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcastq_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { unsafe { let broadcast = _mm512_broadcastq_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, broadcast, src.as_i64x8())) @@ -27018,7 +28190,8 @@ pub fn _mm512_mask_broadcastq_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq -pub fn _mm512_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m512i { unsafe { let broadcast = _mm512_broadcastq_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, broadcast, i64x8::ZERO)) @@ -27032,7 +28205,8 @@ pub fn _mm512_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq -pub fn _mm256_mask_broadcastq_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcastq_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let broadcast = _mm256_broadcastq_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, broadcast, src.as_i64x4())) @@ -27046,7 +28220,8 @@ pub fn _mm256_mask_broadcastq_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq -pub fn _mm256_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m256i { unsafe { let broadcast = _mm256_broadcastq_epi64(a).as_i64x4(); transmute(simd_select_bitmask(k, broadcast, i64x4::ZERO)) @@ -27060,7 +28235,8 @@ pub fn _mm256_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq -pub fn _mm_mask_broadcastq_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_broadcastq_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { let broadcast = _mm_broadcastq_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, broadcast, src.as_i64x2())) @@ -27074,7 +28250,8 @@ pub fn _mm_mask_broadcastq_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m12 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq -pub fn _mm_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { let broadcast = _mm_broadcastq_epi64(a).as_i64x2(); transmute(simd_select_bitmask(k, broadcast, i64x2::ZERO)) @@ -27088,7 +28265,8 @@ pub fn _mm_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastss))] -pub fn _mm512_broadcastss_ps(a: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcastss_ps(a: __m128) -> __m512 { unsafe { simd_shuffle!(a, a, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) } } @@ -27099,7 +28277,8 @@ pub fn _mm512_broadcastss_ps(a: __m128) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastss))] -pub fn _mm512_mask_broadcastss_ps(src: __m512, k: __mmask16, a: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcastss_ps(src: __m512, k: __mmask16, a: __m128) -> __m512 { unsafe { let broadcast = _mm512_broadcastss_ps(a).as_f32x16(); transmute(simd_select_bitmask(k, broadcast, src.as_f32x16())) @@ -27113,7 +28292,8 @@ pub fn _mm512_mask_broadcastss_ps(src: __m512, k: __mmask16, a: __m128) -> __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastss))] -pub fn _mm512_maskz_broadcastss_ps(k: __mmask16, a: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcastss_ps(k: __mmask16, a: __m128) -> __m512 { unsafe { let broadcast = _mm512_broadcastss_ps(a).as_f32x16(); transmute(simd_select_bitmask(k, broadcast, f32x16::ZERO)) @@ -27127,7 +28307,8 @@ pub fn _mm512_maskz_broadcastss_ps(k: __mmask16, a: __m128) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastss))] -pub fn _mm256_mask_broadcastss_ps(src: __m256, k: __mmask8, a: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcastss_ps(src: __m256, k: __mmask8, a: __m128) -> __m256 { unsafe { let broadcast = _mm256_broadcastss_ps(a).as_f32x8(); transmute(simd_select_bitmask(k, broadcast, src.as_f32x8())) @@ -27141,7 +28322,8 @@ pub fn _mm256_mask_broadcastss_ps(src: __m256, k: __mmask8, a: __m128) -> __m256 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastss))] -pub fn _mm256_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m256 { unsafe { let broadcast = _mm256_broadcastss_ps(a).as_f32x8(); transmute(simd_select_bitmask(k, broadcast, f32x8::ZERO)) @@ -27155,7 +28337,8 @@ pub fn _mm256_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastss))] -pub fn _mm_mask_broadcastss_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_broadcastss_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { unsafe { let broadcast = _mm_broadcastss_ps(a).as_f32x4(); transmute(simd_select_bitmask(k, broadcast, src.as_f32x4())) @@ -27169,7 +28352,8 @@ pub fn _mm_mask_broadcastss_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastss))] -pub fn _mm_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m128 { unsafe { let broadcast = _mm_broadcastss_ps(a).as_f32x4(); transmute(simd_select_bitmask(k, broadcast, f32x4::ZERO)) @@ -27183,7 +28367,8 @@ pub fn _mm_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastsd))] -pub fn _mm512_broadcastsd_pd(a: __m128d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcastsd_pd(a: __m128d) -> __m512d { unsafe { simd_shuffle!(a, a, [0, 0, 0, 0, 0, 0, 0, 0]) } } @@ -27194,7 +28379,8 @@ pub fn _mm512_broadcastsd_pd(a: __m128d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastsd))] -pub fn _mm512_mask_broadcastsd_pd(src: __m512d, k: __mmask8, a: __m128d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcastsd_pd(src: __m512d, k: __mmask8, a: __m128d) -> __m512d { unsafe { let broadcast = _mm512_broadcastsd_pd(a).as_f64x8(); transmute(simd_select_bitmask(k, broadcast, src.as_f64x8())) @@ -27208,7 +28394,8 @@ pub fn _mm512_mask_broadcastsd_pd(src: __m512d, k: __mmask8, a: __m128d) -> __m5 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastsd))] -pub fn _mm512_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m512d { unsafe { let broadcast = _mm512_broadcastsd_pd(a).as_f64x8(); transmute(simd_select_bitmask(k, broadcast, f64x8::ZERO)) @@ -27222,7 +28409,8 @@ pub fn _mm512_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m512d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastsd))] -pub fn _mm256_mask_broadcastsd_pd(src: __m256d, k: __mmask8, a: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcastsd_pd(src: __m256d, k: __mmask8, a: __m128d) -> __m256d { unsafe { let broadcast = _mm256_broadcastsd_pd(a).as_f64x4(); transmute(simd_select_bitmask(k, broadcast, src.as_f64x4())) @@ -27236,7 +28424,8 @@ pub fn _mm256_mask_broadcastsd_pd(src: __m256d, k: __mmask8, a: __m128d) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vbroadcastsd))] -pub fn _mm256_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m256d { unsafe { let broadcast = _mm256_broadcastsd_pd(a).as_f64x4(); transmute(simd_select_bitmask(k, broadcast, f64x4::ZERO)) @@ -27249,7 +28438,8 @@ pub fn _mm256_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m256d { #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti32x4, linux: vshuf #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_broadcast_i32x4(a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcast_i32x4(a: __m128i) -> __m512i { unsafe { let a = a.as_i32x4(); let ret: i32x16 = simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]); @@ -27263,7 +28453,8 @@ pub fn _mm512_broadcast_i32x4(a: __m128i) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti32x4, linux: vshuf #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_broadcast_i32x4(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcast_i32x4(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { unsafe { let broadcast = _mm512_broadcast_i32x4(a).as_i32x16(); transmute(simd_select_bitmask(k, broadcast, src.as_i32x16())) @@ -27276,7 +28467,8 @@ pub fn _mm512_mask_broadcast_i32x4(src: __m512i, k: __mmask16, a: __m128i) -> __ #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti32x4, linux: vshuf #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_broadcast_i32x4(k: __mmask16, a: __m128i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcast_i32x4(k: __mmask16, a: __m128i) -> __m512i { unsafe { let broadcast = _mm512_broadcast_i32x4(a).as_i32x16(); transmute(simd_select_bitmask(k, broadcast, i32x16::ZERO)) @@ -27289,7 +28481,8 @@ pub fn _mm512_maskz_broadcast_i32x4(k: __mmask16, a: __m128i) -> __m512i { #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcasti32x4, linux: vshuf #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_broadcast_i32x4(a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcast_i32x4(a: __m128i) -> __m256i { unsafe { let a = a.as_i32x4(); let ret: i32x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3]); @@ -27303,7 +28496,8 @@ pub fn _mm256_broadcast_i32x4(a: __m128i) -> __m256i { #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcasti32x4, linux: vshuf #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_broadcast_i32x4(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcast_i32x4(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { unsafe { let broadcast = _mm256_broadcast_i32x4(a).as_i32x8(); transmute(simd_select_bitmask(k, broadcast, src.as_i32x8())) @@ -27316,7 +28510,8 @@ pub fn _mm256_mask_broadcast_i32x4(src: __m256i, k: __mmask8, a: __m128i) -> __m #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcasti32x4, linux: vshuf #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_broadcast_i32x4(k: __mmask8, a: __m128i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcast_i32x4(k: __mmask8, a: __m128i) -> __m256i { unsafe { let broadcast = _mm256_broadcast_i32x4(a).as_i32x8(); transmute(simd_select_bitmask(k, broadcast, i32x8::ZERO)) @@ -27329,7 +28524,8 @@ pub fn _mm256_maskz_broadcast_i32x4(k: __mmask8, a: __m128i) -> __m256i { #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti64x4, linux: vperm #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_broadcast_i64x4(a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcast_i64x4(a: __m256i) -> __m512i { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3]) } } @@ -27339,7 +28535,8 @@ pub fn _mm512_broadcast_i64x4(a: __m256i) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti64x4, linux: vperm #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_broadcast_i64x4(src: __m512i, k: __mmask8, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcast_i64x4(src: __m512i, k: __mmask8, a: __m256i) -> __m512i { unsafe { let broadcast = _mm512_broadcast_i64x4(a).as_i64x8(); transmute(simd_select_bitmask(k, broadcast, src.as_i64x8())) @@ -27352,7 +28549,8 @@ pub fn _mm512_mask_broadcast_i64x4(src: __m512i, k: __mmask8, a: __m256i) -> __m #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti64x4, linux: vperm #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_broadcast_i64x4(k: __mmask8, a: __m256i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcast_i64x4(k: __mmask8, a: __m256i) -> __m512i { unsafe { let broadcast = _mm512_broadcast_i64x4(a).as_i64x8(); transmute(simd_select_bitmask(k, broadcast, i64x8::ZERO)) @@ -27365,7 +28563,8 @@ pub fn _mm512_maskz_broadcast_i64x4(k: __mmask8, a: __m256i) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf32x4, linux: vshuf #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_broadcast_f32x4(a: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcast_f32x4(a: __m128) -> __m512 { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]) } } @@ -27375,7 +28574,8 @@ pub fn _mm512_broadcast_f32x4(a: __m128) -> __m512 { #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf32x4, linux: vshu #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_broadcast_f32x4(src: __m512, k: __mmask16, a: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcast_f32x4(src: __m512, k: __mmask16, a: __m128) -> __m512 { unsafe { let broadcast = _mm512_broadcast_f32x4(a).as_f32x16(); transmute(simd_select_bitmask(k, broadcast, src.as_f32x16())) @@ -27388,7 +28588,8 @@ pub fn _mm512_mask_broadcast_f32x4(src: __m512, k: __mmask16, a: __m128) -> __m5 #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf32x4, linux: vshu #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_broadcast_f32x4(k: __mmask16, a: __m128) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcast_f32x4(k: __mmask16, a: __m128) -> __m512 { unsafe { let broadcast = _mm512_broadcast_f32x4(a).as_f32x16(); transmute(simd_select_bitmask(k, broadcast, f32x16::ZERO)) @@ -27401,7 +28602,8 @@ pub fn _mm512_maskz_broadcast_f32x4(k: __mmask16, a: __m128) -> __m512 { #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcastf32x4, linux: vshuf #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_broadcast_f32x4(a: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_broadcast_f32x4(a: __m128) -> __m256 { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3]) } } @@ -27411,7 +28613,8 @@ pub fn _mm256_broadcast_f32x4(a: __m128) -> __m256 { #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcastf32x4, linux: vshu #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_mask_broadcast_f32x4(src: __m256, k: __mmask8, a: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_broadcast_f32x4(src: __m256, k: __mmask8, a: __m128) -> __m256 { unsafe { let broadcast = _mm256_broadcast_f32x4(a).as_f32x8(); transmute(simd_select_bitmask(k, broadcast, src.as_f32x8())) @@ -27424,7 +28627,8 @@ pub fn _mm256_mask_broadcast_f32x4(src: __m256, k: __mmask8, a: __m128) -> __m25 #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcastf32x4, linux: vshu #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm256_maskz_broadcast_f32x4(k: __mmask8, a: __m128) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_broadcast_f32x4(k: __mmask8, a: __m128) -> __m256 { unsafe { let broadcast = _mm256_broadcast_f32x4(a).as_f32x8(); transmute(simd_select_bitmask(k, broadcast, f32x8::ZERO)) @@ -27437,7 +28641,8 @@ pub fn _mm256_maskz_broadcast_f32x4(k: __mmask8, a: __m128) -> __m256 { #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf64x4, linux: vperm #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_broadcast_f64x4(a: __m256d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_broadcast_f64x4(a: __m256d) -> __m512d { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3]) } } @@ -27447,7 +28652,8 @@ pub fn _mm512_broadcast_f64x4(a: __m256d) -> __m512d { #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf64x4, linux: vper #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_broadcast_f64x4(src: __m512d, k: __mmask8, a: __m256d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_broadcast_f64x4(src: __m512d, k: __mmask8, a: __m256d) -> __m512d { unsafe { let broadcast = _mm512_broadcast_f64x4(a).as_f64x8(); transmute(simd_select_bitmask(k, broadcast, src.as_f64x8())) @@ -27460,7 +28666,8 @@ pub fn _mm512_mask_broadcast_f64x4(src: __m512d, k: __mmask8, a: __m256d) -> __m #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf64x4, linux: vper #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_maskz_broadcast_f64x4(k: __mmask8, a: __m256d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_broadcast_f64x4(k: __mmask8, a: __m256d) -> __m512d { unsafe { let broadcast = _mm512_broadcast_f64x4(a).as_f64x8(); transmute(simd_select_bitmask(k, broadcast, f64x8::ZERO)) @@ -27474,7 +28681,8 @@ pub fn _mm512_maskz_broadcast_f64x4(k: __mmask8, a: __m256d) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa32))] //should be vpblendmd -pub fn _mm512_mask_blend_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_blend_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask(k, b.as_i32x16(), a.as_i32x16())) } } @@ -27485,7 +28693,8 @@ pub fn _mm512_mask_blend_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa32))] //should be vpblendmd -pub fn _mm256_mask_blend_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_blend_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask(k, b.as_i32x8(), a.as_i32x8())) } } @@ -27496,7 +28705,8 @@ pub fn _mm256_mask_blend_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa32))] //should be vpblendmd -pub fn _mm_mask_blend_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_blend_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask(k, b.as_i32x4(), a.as_i32x4())) } } @@ -27507,7 +28717,8 @@ pub fn _mm_mask_blend_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa64))] //should be vpblendmq -pub fn _mm512_mask_blend_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_blend_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask(k, b.as_i64x8(), a.as_i64x8())) } } @@ -27518,7 +28729,8 @@ pub fn _mm512_mask_blend_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa64))] //should be vpblendmq -pub fn _mm256_mask_blend_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_blend_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask(k, b.as_i64x4(), a.as_i64x4())) } } @@ -27529,7 +28741,8 @@ pub fn _mm256_mask_blend_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovdqa64))] //should be vpblendmq -pub fn _mm_mask_blend_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_blend_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask(k, b.as_i64x2(), a.as_i64x2())) } } @@ -27540,7 +28753,8 @@ pub fn _mm_mask_blend_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vpblendmps -pub fn _mm512_mask_blend_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_blend_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { unsafe { transmute(simd_select_bitmask(k, b.as_f32x16(), a.as_f32x16())) } } @@ -27551,7 +28765,8 @@ pub fn _mm512_mask_blend_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vpblendmps -pub fn _mm256_mask_blend_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_blend_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { unsafe { transmute(simd_select_bitmask(k, b.as_f32x8(), a.as_f32x8())) } } @@ -27562,7 +28777,8 @@ pub fn _mm256_mask_blend_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vpblendmps -pub fn _mm_mask_blend_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_blend_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { transmute(simd_select_bitmask(k, b.as_f32x4(), a.as_f32x4())) } } @@ -27573,7 +28789,8 @@ pub fn _mm_mask_blend_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovapd))] //should be vpblendmpd -pub fn _mm512_mask_blend_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_blend_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { unsafe { transmute(simd_select_bitmask(k, b.as_f64x8(), a.as_f64x8())) } } @@ -27584,7 +28801,8 @@ pub fn _mm512_mask_blend_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovapd))] //should be vpblendmpd -pub fn _mm256_mask_blend_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_blend_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { unsafe { transmute(simd_select_bitmask(k, b.as_f64x4(), a.as_f64x4())) } } @@ -27595,7 +28813,8 @@ pub fn _mm256_mask_blend_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovapd))] //should be vpblendmpd -pub fn _mm_mask_blend_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_blend_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { transmute(simd_select_bitmask(k, b.as_f64x2(), a.as_f64x2())) } } @@ -27609,7 +28828,8 @@ pub fn _mm_mask_blend_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_alignr_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_alignr_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x16(); @@ -27690,7 +28910,8 @@ pub fn _mm512_alignr_epi32(a: __m512i, b: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_alignr_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_alignr_epi32( src: __m512i, k: __mmask16, a: __m512i, @@ -27711,7 +28932,12 @@ pub fn _mm512_mask_alignr_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_alignr_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_alignr_epi32( + k: __mmask16, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = _mm512_alignr_epi32::(a, b); @@ -27729,7 +28955,8 @@ pub fn _mm512_maskz_alignr_epi32(k: __mmask16, a: __m512i, b: _ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_alignr_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_alignr_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x8(); @@ -27758,7 +28985,8 @@ pub fn _mm256_alignr_epi32(a: __m256i, b: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_alignr_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_alignr_epi32( src: __m256i, k: __mmask8, a: __m256i, @@ -27779,7 +29007,12 @@ pub fn _mm256_mask_alignr_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_alignr_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_alignr_epi32( + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = _mm256_alignr_epi32::(a, b); @@ -27797,7 +29030,8 @@ pub fn _mm256_maskz_alignr_epi32(k: __mmask8, a: __m256i, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] //should be valignd #[rustc_legacy_const_generics(2)] -pub fn _mm_alignr_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_alignr_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let a = a.as_i32x4(); @@ -27822,7 +29056,8 @@ pub fn _mm_alignr_epi32(a: __m128i, b: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm_mask_alignr_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_alignr_epi32( src: __m128i, k: __mmask8, a: __m128i, @@ -27843,7 +29078,12 @@ pub fn _mm_mask_alignr_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm_maskz_alignr_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_alignr_epi32( + k: __mmask8, + a: __m128i, + b: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = _mm_alignr_epi32::(a, b); @@ -27861,7 +29101,8 @@ pub fn _mm_maskz_alignr_epi32(k: __mmask8, a: __m128i, b: __m12 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_alignr_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_alignr_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let imm8: i32 = IMM8 % 8; @@ -27888,7 +29129,8 @@ pub fn _mm512_alignr_epi64(a: __m512i, b: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_alignr_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_alignr_epi64( src: __m512i, k: __mmask8, a: __m512i, @@ -27909,7 +29151,12 @@ pub fn _mm512_mask_alignr_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_alignr_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_alignr_epi64( + k: __mmask8, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = _mm512_alignr_epi64::(a, b); @@ -27927,7 +29174,8 @@ pub fn _mm512_maskz_alignr_epi64(k: __mmask8, a: __m512i, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_alignr_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_alignr_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let imm8: i32 = IMM8 % 4; @@ -27950,7 +29198,8 @@ pub fn _mm256_alignr_epi64(a: __m256i, b: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_alignr_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_alignr_epi64( src: __m256i, k: __mmask8, a: __m256i, @@ -27971,7 +29220,12 @@ pub fn _mm256_mask_alignr_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_alignr_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_alignr_epi64( + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = _mm256_alignr_epi64::(a, b); @@ -27989,7 +29243,8 @@ pub fn _mm256_maskz_alignr_epi64(k: __mmask8, a: __m256i, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] //should be valignq #[rustc_legacy_const_generics(2)] -pub fn _mm_alignr_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_alignr_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let imm8: i32 = IMM8 % 2; @@ -28010,7 +29265,8 @@ pub fn _mm_alignr_epi64(a: __m128i, b: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(4)] -pub fn _mm_mask_alignr_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_alignr_epi64( src: __m128i, k: __mmask8, a: __m128i, @@ -28031,7 +29287,12 @@ pub fn _mm_mask_alignr_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] -pub fn _mm_maskz_alignr_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_alignr_epi64( + k: __mmask8, + a: __m128i, + b: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let r = _mm_alignr_epi64::(a, b); @@ -28046,7 +29307,8 @@ pub fn _mm_maskz_alignr_epi64(k: __mmask8, a: __m128i, b: __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandq))] //should be vpandd, but generate vpandq -pub fn _mm512_and_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_and_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_and(a.as_i32x16(), b.as_i32x16())) } } @@ -28057,7 +29319,8 @@ pub fn _mm512_and_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandd))] -pub fn _mm512_mask_and_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_and_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let and = _mm512_and_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, and, src.as_i32x16())) @@ -28071,7 +29334,8 @@ pub fn _mm512_mask_and_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandd))] -pub fn _mm512_maskz_and_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_and_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let and = _mm512_and_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, and, i32x16::ZERO)) @@ -28085,7 +29349,8 @@ pub fn _mm512_maskz_and_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandd))] -pub fn _mm256_mask_and_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_and_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let and = simd_and(a.as_i32x8(), b.as_i32x8()); transmute(simd_select_bitmask(k, and, src.as_i32x8())) @@ -28099,7 +29364,8 @@ pub fn _mm256_mask_and_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandd))] -pub fn _mm256_maskz_and_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_and_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let and = simd_and(a.as_i32x8(), b.as_i32x8()); transmute(simd_select_bitmask(k, and, i32x8::ZERO)) @@ -28113,7 +29379,8 @@ pub fn _mm256_maskz_and_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandd))] -pub fn _mm_mask_and_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_and_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let and = simd_and(a.as_i32x4(), b.as_i32x4()); transmute(simd_select_bitmask(k, and, src.as_i32x4())) @@ -28127,7 +29394,8 @@ pub fn _mm_mask_and_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandd))] -pub fn _mm_maskz_and_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_and_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let and = simd_and(a.as_i32x4(), b.as_i32x4()); transmute(simd_select_bitmask(k, and, i32x4::ZERO)) @@ -28141,7 +29409,8 @@ pub fn _mm_maskz_and_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandq))] -pub fn _mm512_and_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_and_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_and(a.as_i64x8(), b.as_i64x8())) } } @@ -28152,7 +29421,8 @@ pub fn _mm512_and_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandq))] -pub fn _mm512_mask_and_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_and_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let and = _mm512_and_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, and, src.as_i64x8())) @@ -28166,7 +29436,8 @@ pub fn _mm512_mask_and_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandq))] -pub fn _mm512_maskz_and_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_and_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let and = _mm512_and_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, and, i64x8::ZERO)) @@ -28180,7 +29451,8 @@ pub fn _mm512_maskz_and_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandq))] -pub fn _mm256_mask_and_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_and_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let and = simd_and(a.as_i64x4(), b.as_i64x4()); transmute(simd_select_bitmask(k, and, src.as_i64x4())) @@ -28194,7 +29466,8 @@ pub fn _mm256_mask_and_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandq))] -pub fn _mm256_maskz_and_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_and_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let and = simd_and(a.as_i64x4(), b.as_i64x4()); transmute(simd_select_bitmask(k, and, i64x4::ZERO)) @@ -28208,7 +29481,8 @@ pub fn _mm256_maskz_and_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandq))] -pub fn _mm_mask_and_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_and_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let and = simd_and(a.as_i64x2(), b.as_i64x2()); transmute(simd_select_bitmask(k, and, src.as_i64x2())) @@ -28222,7 +29496,8 @@ pub fn _mm_mask_and_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandq))] -pub fn _mm_maskz_and_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_and_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let and = simd_and(a.as_i64x2(), b.as_i64x2()); transmute(simd_select_bitmask(k, and, i64x2::ZERO)) @@ -28236,7 +29511,8 @@ pub fn _mm_maskz_and_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandq))] -pub fn _mm512_and_si512(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_and_si512(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_and(a.as_i32x16(), b.as_i32x16())) } } @@ -28247,7 +29523,8 @@ pub fn _mm512_and_si512(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vporq))] -pub fn _mm512_or_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_or_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_or(a.as_i32x16(), b.as_i32x16())) } } @@ -28258,7 +29535,8 @@ pub fn _mm512_or_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpord))] -pub fn _mm512_mask_or_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_or_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let or = _mm512_or_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, or, src.as_i32x16())) @@ -28272,7 +29550,8 @@ pub fn _mm512_mask_or_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpord))] -pub fn _mm512_maskz_or_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_or_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let or = _mm512_or_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, or, i32x16::ZERO)) @@ -28286,7 +29565,8 @@ pub fn _mm512_maskz_or_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vor))] //should be vpord -pub fn _mm256_or_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_or_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_or(a.as_i32x8(), b.as_i32x8())) } } @@ -28297,7 +29577,8 @@ pub fn _mm256_or_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpord))] -pub fn _mm256_mask_or_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_or_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let or = _mm256_or_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, or, src.as_i32x8())) @@ -28311,7 +29592,8 @@ pub fn _mm256_mask_or_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpord))] -pub fn _mm256_maskz_or_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_or_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let or = _mm256_or_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, or, i32x8::ZERO)) @@ -28325,7 +29607,8 @@ pub fn _mm256_maskz_or_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vor))] //should be vpord -pub fn _mm_or_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_or_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_or(a.as_i32x4(), b.as_i32x4())) } } @@ -28336,7 +29619,8 @@ pub fn _mm_or_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpord))] -pub fn _mm_mask_or_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_or_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let or = _mm_or_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, or, src.as_i32x4())) @@ -28350,7 +29634,8 @@ pub fn _mm_mask_or_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpord))] -pub fn _mm_maskz_or_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_or_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let or = _mm_or_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, or, i32x4::ZERO)) @@ -28364,7 +29649,8 @@ pub fn _mm_maskz_or_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vporq))] -pub fn _mm512_or_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_or_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_or(a.as_i64x8(), b.as_i64x8())) } } @@ -28375,7 +29661,8 @@ pub fn _mm512_or_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vporq))] -pub fn _mm512_mask_or_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_or_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let or = _mm512_or_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, or, src.as_i64x8())) @@ -28389,7 +29676,8 @@ pub fn _mm512_mask_or_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) - #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vporq))] -pub fn _mm512_maskz_or_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_or_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let or = _mm512_or_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, or, i64x8::ZERO)) @@ -28403,7 +29691,8 @@ pub fn _mm512_maskz_or_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vor))] //should be vporq -pub fn _mm256_or_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_or_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_or(a.as_i64x4(), b.as_i64x4())) } } @@ -28414,7 +29703,8 @@ pub fn _mm256_or_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vporq))] -pub fn _mm256_mask_or_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_or_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let or = _mm256_or_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, or, src.as_i64x4())) @@ -28428,7 +29718,8 @@ pub fn _mm256_mask_or_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) - #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vporq))] -pub fn _mm256_maskz_or_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_or_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let or = _mm256_or_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, or, i64x4::ZERO)) @@ -28442,7 +29733,8 @@ pub fn _mm256_maskz_or_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vor))] //should be vporq -pub fn _mm_or_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_or_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_or(a.as_i64x2(), b.as_i64x2())) } } @@ -28453,7 +29745,8 @@ pub fn _mm_or_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vporq))] -pub fn _mm_mask_or_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_or_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let or = _mm_or_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, or, src.as_i64x2())) @@ -28467,7 +29760,8 @@ pub fn _mm_mask_or_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vporq))] -pub fn _mm_maskz_or_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_or_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let or = _mm_or_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, or, i64x2::ZERO)) @@ -28481,7 +29775,8 @@ pub fn _mm_maskz_or_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vporq))] -pub fn _mm512_or_si512(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_or_si512(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_or(a.as_i32x16(), b.as_i32x16())) } } @@ -28492,7 +29787,8 @@ pub fn _mm512_or_si512(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxorq))] //should be vpxord -pub fn _mm512_xor_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_xor_epi32(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_xor(a.as_i32x16(), b.as_i32x16())) } } @@ -28503,7 +29799,8 @@ pub fn _mm512_xor_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxord))] -pub fn _mm512_mask_xor_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_xor_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let xor = _mm512_xor_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, xor, src.as_i32x16())) @@ -28517,7 +29814,8 @@ pub fn _mm512_mask_xor_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxord))] -pub fn _mm512_maskz_xor_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_xor_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let xor = _mm512_xor_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, xor, i32x16::ZERO)) @@ -28531,7 +29829,8 @@ pub fn _mm512_maskz_xor_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vxor))] //should be vpxord -pub fn _mm256_xor_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_xor_epi32(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_xor(a.as_i32x8(), b.as_i32x8())) } } @@ -28542,7 +29841,8 @@ pub fn _mm256_xor_epi32(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxord))] -pub fn _mm256_mask_xor_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_xor_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let xor = _mm256_xor_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, xor, src.as_i32x8())) @@ -28556,7 +29856,8 @@ pub fn _mm256_mask_xor_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxord))] -pub fn _mm256_maskz_xor_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_xor_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let xor = _mm256_xor_epi32(a, b).as_i32x8(); transmute(simd_select_bitmask(k, xor, i32x8::ZERO)) @@ -28570,7 +29871,8 @@ pub fn _mm256_maskz_xor_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vxor))] //should be vpxord -pub fn _mm_xor_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_xor_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_xor(a.as_i32x4(), b.as_i32x4())) } } @@ -28581,7 +29883,8 @@ pub fn _mm_xor_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxord))] -pub fn _mm_mask_xor_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_xor_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let xor = _mm_xor_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, xor, src.as_i32x4())) @@ -28595,7 +29898,8 @@ pub fn _mm_mask_xor_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxord))] -pub fn _mm_maskz_xor_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_xor_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let xor = _mm_xor_epi32(a, b).as_i32x4(); transmute(simd_select_bitmask(k, xor, i32x4::ZERO)) @@ -28609,7 +29913,8 @@ pub fn _mm_maskz_xor_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxorq))] -pub fn _mm512_xor_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_xor_epi64(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_xor(a.as_i64x8(), b.as_i64x8())) } } @@ -28620,7 +29925,8 @@ pub fn _mm512_xor_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxorq))] -pub fn _mm512_mask_xor_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_xor_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let xor = _mm512_xor_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, xor, src.as_i64x8())) @@ -28634,7 +29940,8 @@ pub fn _mm512_mask_xor_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxorq))] -pub fn _mm512_maskz_xor_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_xor_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let xor = _mm512_xor_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, xor, i64x8::ZERO)) @@ -28648,7 +29955,8 @@ pub fn _mm512_maskz_xor_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vxor))] //should be vpxorq -pub fn _mm256_xor_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_xor_epi64(a: __m256i, b: __m256i) -> __m256i { unsafe { transmute(simd_xor(a.as_i64x4(), b.as_i64x4())) } } @@ -28659,7 +29967,8 @@ pub fn _mm256_xor_epi64(a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxorq))] -pub fn _mm256_mask_xor_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_xor_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let xor = _mm256_xor_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, xor, src.as_i64x4())) @@ -28673,7 +29982,8 @@ pub fn _mm256_mask_xor_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxorq))] -pub fn _mm256_maskz_xor_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_xor_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let xor = _mm256_xor_epi64(a, b).as_i64x4(); transmute(simd_select_bitmask(k, xor, i64x4::ZERO)) @@ -28687,7 +29997,8 @@ pub fn _mm256_maskz_xor_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vxor))] //should be vpxorq -pub fn _mm_xor_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_xor_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_xor(a.as_i64x2(), b.as_i64x2())) } } @@ -28698,7 +30009,8 @@ pub fn _mm_xor_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxorq))] -pub fn _mm_mask_xor_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_xor_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let xor = _mm_xor_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, xor, src.as_i64x2())) @@ -28712,7 +30024,8 @@ pub fn _mm_mask_xor_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxorq))] -pub fn _mm_maskz_xor_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_xor_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let xor = _mm_xor_epi64(a, b).as_i64x2(); transmute(simd_select_bitmask(k, xor, i64x2::ZERO)) @@ -28726,7 +30039,8 @@ pub fn _mm_maskz_xor_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpxorq))] -pub fn _mm512_xor_si512(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_xor_si512(a: __m512i, b: __m512i) -> __m512i { unsafe { transmute(simd_xor(a.as_i32x16(), b.as_i32x16())) } } @@ -28737,7 +30051,8 @@ pub fn _mm512_xor_si512(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnq))] //should be vpandnd -pub fn _mm512_andnot_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_andnot_epi32(a: __m512i, b: __m512i) -> __m512i { _mm512_and_epi32(_mm512_xor_epi32(a, _mm512_set1_epi32(u32::MAX as i32)), b) } @@ -28748,7 +30063,13 @@ pub fn _mm512_andnot_epi32(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnd))] -pub fn _mm512_mask_andnot_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_andnot_epi32( + src: __m512i, + k: __mmask16, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let andnot = _mm512_andnot_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, andnot, src.as_i32x16())) @@ -28762,7 +30083,8 @@ pub fn _mm512_mask_andnot_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m51 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnd))] -pub fn _mm512_maskz_andnot_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_andnot_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { unsafe { let andnot = _mm512_andnot_epi32(a, b).as_i32x16(); transmute(simd_select_bitmask(k, andnot, i32x16::ZERO)) @@ -28776,7 +30098,13 @@ pub fn _mm512_maskz_andnot_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnd))] -pub fn _mm256_mask_andnot_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_andnot_epi32( + src: __m256i, + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let not = _mm256_xor_epi32(a, _mm256_set1_epi32(u32::MAX as i32)); let andnot = simd_and(not.as_i32x8(), b.as_i32x8()); @@ -28791,7 +30119,8 @@ pub fn _mm256_mask_andnot_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnd))] -pub fn _mm256_maskz_andnot_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_andnot_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let not = _mm256_xor_epi32(a, _mm256_set1_epi32(u32::MAX as i32)); let andnot = simd_and(not.as_i32x8(), b.as_i32x8()); @@ -28806,7 +30135,8 @@ pub fn _mm256_maskz_andnot_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnd))] -pub fn _mm_mask_andnot_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_andnot_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let not = _mm_xor_epi32(a, _mm_set1_epi32(u32::MAX as i32)); let andnot = simd_and(not.as_i32x4(), b.as_i32x4()); @@ -28821,7 +30151,8 @@ pub fn _mm_mask_andnot_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnd))] -pub fn _mm_maskz_andnot_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_andnot_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let not = _mm_xor_epi32(a, _mm_set1_epi32(u32::MAX as i32)); let andnot = simd_and(not.as_i32x4(), b.as_i32x4()); @@ -28836,7 +30167,8 @@ pub fn _mm_maskz_andnot_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnq))] //should be vpandnd -pub fn _mm512_andnot_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_andnot_epi64(a: __m512i, b: __m512i) -> __m512i { _mm512_and_epi64(_mm512_xor_epi64(a, _mm512_set1_epi64(u64::MAX as i64)), b) } @@ -28847,7 +30179,13 @@ pub fn _mm512_andnot_epi64(a: __m512i, b: __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnq))] -pub fn _mm512_mask_andnot_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_andnot_epi64( + src: __m512i, + k: __mmask8, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { let andnot = _mm512_andnot_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, andnot, src.as_i64x8())) @@ -28861,7 +30199,8 @@ pub fn _mm512_mask_andnot_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnq))] -pub fn _mm512_maskz_andnot_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_andnot_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { unsafe { let andnot = _mm512_andnot_epi64(a, b).as_i64x8(); transmute(simd_select_bitmask(k, andnot, i64x8::ZERO)) @@ -28875,7 +30214,13 @@ pub fn _mm512_maskz_andnot_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnq))] -pub fn _mm256_mask_andnot_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_andnot_epi64( + src: __m256i, + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { let not = _mm256_xor_epi64(a, _mm256_set1_epi64x(u64::MAX as i64)); let andnot = simd_and(not.as_i64x4(), b.as_i64x4()); @@ -28890,7 +30235,8 @@ pub fn _mm256_mask_andnot_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256 #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnq))] -pub fn _mm256_maskz_andnot_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_andnot_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { unsafe { let not = _mm256_xor_epi64(a, _mm256_set1_epi64x(u64::MAX as i64)); let andnot = simd_and(not.as_i64x4(), b.as_i64x4()); @@ -28905,7 +30251,8 @@ pub fn _mm256_maskz_andnot_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnq))] -pub fn _mm_mask_andnot_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_andnot_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let not = _mm_xor_epi64(a, _mm_set1_epi64x(u64::MAX as i64)); let andnot = simd_and(not.as_i64x2(), b.as_i64x2()); @@ -28920,7 +30267,8 @@ pub fn _mm_mask_andnot_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnq))] -pub fn _mm_maskz_andnot_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_andnot_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { unsafe { let not = _mm_xor_epi64(a, _mm_set1_epi64x(u64::MAX as i64)); let andnot = simd_and(not.as_i64x2(), b.as_i64x2()); @@ -28935,7 +30283,8 @@ pub fn _mm_maskz_andnot_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpandnq))] -pub fn _mm512_andnot_si512(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_andnot_si512(a: __m512i, b: __m512i) -> __m512i { _mm512_and_epi64(_mm512_xor_epi64(a, _mm512_set1_epi64(u64::MAX as i64)), b) } @@ -28945,7 +30294,8 @@ pub fn _mm512_andnot_si512(a: __m512i, b: __m512i) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _cvtmask16_u32(a: __mmask16) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _cvtmask16_u32(a: __mmask16) -> u32 { a as u32 } @@ -28955,7 +30305,8 @@ pub fn _cvtmask16_u32(a: __mmask16) -> u32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _cvtu32_mask16(a: u32) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _cvtu32_mask16(a: u32) -> __mmask16 { a as __mmask16 } @@ -28966,7 +30317,8 @@ pub fn _cvtu32_mask16(a: u32) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandw -pub fn _kand_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kand_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { a & b } @@ -28977,7 +30329,8 @@ pub fn _kand_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandw -pub fn _mm512_kand(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_kand(a: __mmask16, b: __mmask16) -> __mmask16 { a & b } @@ -28988,7 +30341,8 @@ pub fn _mm512_kand(a: __mmask16, b: __mmask16) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(or))] // generate normal or code instead of korw -pub fn _kor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { a | b } @@ -28999,7 +30353,8 @@ pub fn _kor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(or))] // generate normal or code instead of korw -pub fn _mm512_kor(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_kor(a: __mmask16, b: __mmask16) -> __mmask16 { a | b } @@ -29010,7 +30365,8 @@ pub fn _mm512_kor(a: __mmask16, b: __mmask16) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(xor))] // generate normal xor code instead of kxorw -pub fn _kxor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kxor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { a ^ b } @@ -29021,7 +30377,8 @@ pub fn _kxor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(xor))] // generate normal xor code instead of kxorw -pub fn _mm512_kxor(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_kxor(a: __mmask16, b: __mmask16) -> __mmask16 { a ^ b } @@ -29031,7 +30388,8 @@ pub fn _mm512_kxor(a: __mmask16, b: __mmask16) -> __mmask16 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _knot_mask16(a: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _knot_mask16(a: __mmask16) -> __mmask16 { a ^ 0b11111111_11111111 } @@ -29041,7 +30399,8 @@ pub fn _knot_mask16(a: __mmask16) -> __mmask16 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_knot(a: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_knot(a: __mmask16) -> __mmask16 { a ^ 0b11111111_11111111 } @@ -29052,7 +30411,8 @@ pub fn _mm512_knot(a: __mmask16) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(not))] // generate normal and, not code instead of kandnw -pub fn _kandn_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kandn_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { _mm512_kand(_mm512_knot(a), b) } @@ -29063,7 +30423,8 @@ pub fn _kandn_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(not))] // generate normal and code instead of kandw -pub fn _mm512_kandn(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_kandn(a: __mmask16, b: __mmask16) -> __mmask16 { _mm512_kand(_mm512_knot(a), b) } @@ -29074,7 +30435,8 @@ pub fn _mm512_kandn(a: __mmask16, b: __mmask16) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(xor))] // generate normal xor, not code instead of kxnorw -pub fn _kxnor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kxnor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { _mm512_knot(_mm512_kxor(a, b)) } @@ -29085,7 +30447,8 @@ pub fn _kxnor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kandw -pub fn _mm512_kxnor(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_kxnor(a: __mmask16, b: __mmask16) -> __mmask16 { _mm512_knot(_mm512_kxor(a, b)) } @@ -29096,7 +30459,8 @@ pub fn _mm512_kxnor(a: __mmask16, b: __mmask16) -> __mmask16 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _kortest_mask16_u8(a: __mmask16, b: __mmask16, all_ones: *mut u8) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _kortest_mask16_u8(a: __mmask16, b: __mmask16, all_ones: *mut u8) -> u8 { let tmp = _kor_mask16(a, b); *all_ones = (tmp == 0xffff) as u8; (tmp == 0) as u8 @@ -29109,7 +30473,8 @@ pub unsafe fn _kortest_mask16_u8(a: __mmask16, b: __mmask16, all_ones: *mut u8) #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kortestc_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kortestc_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { (_kor_mask16(a, b) == 0xffff) as u8 } @@ -29120,7 +30485,8 @@ pub fn _kortestc_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kortestz_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kortestz_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { (_kor_mask16(a, b) == 0) as u8 } @@ -29131,7 +30497,8 @@ pub fn _kortestz_mask16_u8(a: __mmask16, b: __mmask16) -> u8 { #[target_feature(enable = "avx512f")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kshiftli_mask16(a: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kshiftli_mask16(a: __mmask16) -> __mmask16 { a.unbounded_shl(COUNT) } @@ -29142,7 +30509,8 @@ pub fn _kshiftli_mask16(a: __mmask16) -> __mmask16 { #[target_feature(enable = "avx512f")] #[rustc_legacy_const_generics(1)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _kshiftri_mask16(a: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _kshiftri_mask16(a: __mmask16) -> __mmask16 { a.unbounded_shr(COUNT) } @@ -29152,7 +30520,8 @@ pub fn _kshiftri_mask16(a: __mmask16) -> __mmask16 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _load_mask16(mem_addr: *const __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _load_mask16(mem_addr: *const __mmask16) -> __mmask16 { *mem_addr } @@ -29162,7 +30531,8 @@ pub unsafe fn _load_mask16(mem_addr: *const __mmask16) -> __mmask16 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _store_mask16(mem_addr: *mut __mmask16, a: __mmask16) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _store_mask16(mem_addr: *mut __mmask16, a: __mmask16) { *mem_addr = a; } @@ -29173,7 +30543,8 @@ pub unsafe fn _store_mask16(mem_addr: *mut __mmask16, a: __mmask16) { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kmovw -pub fn _mm512_kmov(a: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_kmov(a: __mmask16) -> __mmask16 { a } @@ -29183,7 +30554,8 @@ pub fn _mm512_kmov(a: __mmask16) -> __mmask16 { #[inline] #[target_feature(enable = "avx512f")] // generate normal and code instead of kmovw #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_int2mask(mask: i32) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_int2mask(mask: i32) -> __mmask16 { mask as u16 } @@ -29194,7 +30566,8 @@ pub fn _mm512_int2mask(mask: i32) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kmovw -pub fn _mm512_mask2int(k1: __mmask16) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask2int(k1: __mmask16) -> i32 { k1 as i32 } @@ -29205,7 +30578,8 @@ pub fn _mm512_mask2int(k1: __mmask16) -> i32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kunpckbw -pub fn _mm512_kunpackb(a: __mmask16, b: __mmask16) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_kunpackb(a: __mmask16, b: __mmask16) -> __mmask16 { ((a & 0xff) << 8) | (b & 0xff) } @@ -29216,7 +30590,8 @@ pub fn _mm512_kunpackb(a: __mmask16, b: __mmask16) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(cmp))] // generate normal and code instead of kortestw -pub fn _mm512_kortestc(a: __mmask16, b: __mmask16) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_kortestc(a: __mmask16, b: __mmask16) -> i32 { let r = (a | b) == 0b11111111_11111111; r as i32 } @@ -29228,7 +30603,8 @@ pub fn _mm512_kortestc(a: __mmask16, b: __mmask16) -> i32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kortestw -pub fn _mm512_kortestz(a: __mmask16, b: __mmask16) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_kortestz(a: __mmask16, b: __mmask16) -> i32 { let r = (a | b) == 0; r as i32 } @@ -29240,7 +30616,8 @@ pub fn _mm512_kortestz(a: __mmask16, b: __mmask16) -> i32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmd))] -pub fn _mm512_test_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_test_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { let and = _mm512_and_epi32(a, b); let zero = _mm512_setzero_si512(); _mm512_cmpneq_epi32_mask(and, zero) @@ -29253,7 +30630,8 @@ pub fn _mm512_test_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmd))] -pub fn _mm512_mask_test_epi32_mask(k: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_test_epi32_mask(k: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { let and = _mm512_and_epi32(a, b); let zero = _mm512_setzero_si512(); _mm512_mask_cmpneq_epi32_mask(k, and, zero) @@ -29266,7 +30644,8 @@ pub fn _mm512_mask_test_epi32_mask(k: __mmask16, a: __m512i, b: __m512i) -> __mm #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmd))] -pub fn _mm256_test_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_test_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_cmpneq_epi32_mask(and, zero) @@ -29279,7 +30658,8 @@ pub fn _mm256_test_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmd))] -pub fn _mm256_mask_test_epi32_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_test_epi32_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_mask_cmpneq_epi32_mask(k, and, zero) @@ -29292,7 +30672,8 @@ pub fn _mm256_mask_test_epi32_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mma #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmd))] -pub fn _mm_test_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_test_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_cmpneq_epi32_mask(and, zero) @@ -29305,7 +30686,8 @@ pub fn _mm_test_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmd))] -pub fn _mm_mask_test_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_test_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_mask_cmpneq_epi32_mask(k, and, zero) @@ -29318,7 +30700,8 @@ pub fn _mm_mask_test_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmq))] -pub fn _mm512_test_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_test_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { let and = _mm512_and_epi64(a, b); let zero = _mm512_setzero_si512(); _mm512_cmpneq_epi64_mask(and, zero) @@ -29331,7 +30714,8 @@ pub fn _mm512_test_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmq))] -pub fn _mm512_mask_test_epi64_mask(k: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_test_epi64_mask(k: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { let and = _mm512_and_epi64(a, b); let zero = _mm512_setzero_si512(); _mm512_mask_cmpneq_epi64_mask(k, and, zero) @@ -29344,7 +30728,8 @@ pub fn _mm512_mask_test_epi64_mask(k: __mmask8, a: __m512i, b: __m512i) -> __mma #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmq))] -pub fn _mm256_test_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_test_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_cmpneq_epi64_mask(and, zero) @@ -29357,7 +30742,8 @@ pub fn _mm256_test_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmq))] -pub fn _mm256_mask_test_epi64_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_test_epi64_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_mask_cmpneq_epi64_mask(k, and, zero) @@ -29370,7 +30756,8 @@ pub fn _mm256_mask_test_epi64_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mma #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmq))] -pub fn _mm_test_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_test_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_cmpneq_epi64_mask(and, zero) @@ -29383,7 +30770,8 @@ pub fn _mm_test_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestmq))] -pub fn _mm_mask_test_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_test_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_mask_cmpneq_epi64_mask(k, and, zero) @@ -29396,7 +30784,8 @@ pub fn _mm_mask_test_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmd))] -pub fn _mm512_testn_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_testn_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { let and = _mm512_and_epi32(a, b); let zero = _mm512_setzero_si512(); _mm512_cmpeq_epi32_mask(and, zero) @@ -29409,7 +30798,8 @@ pub fn _mm512_testn_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmd))] -pub fn _mm512_mask_testn_epi32_mask(k: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_testn_epi32_mask(k: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { let and = _mm512_and_epi32(a, b); let zero = _mm512_setzero_si512(); _mm512_mask_cmpeq_epi32_mask(k, and, zero) @@ -29422,7 +30812,8 @@ pub fn _mm512_mask_testn_epi32_mask(k: __mmask16, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmd))] -pub fn _mm256_testn_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_testn_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_cmpeq_epi32_mask(and, zero) @@ -29435,7 +30826,8 @@ pub fn _mm256_testn_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmd))] -pub fn _mm256_mask_testn_epi32_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_testn_epi32_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_mask_cmpeq_epi32_mask(k, and, zero) @@ -29448,7 +30840,8 @@ pub fn _mm256_mask_testn_epi32_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mm #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmd))] -pub fn _mm_testn_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_testn_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_cmpeq_epi32_mask(and, zero) @@ -29461,7 +30854,8 @@ pub fn _mm_testn_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmd))] -pub fn _mm_mask_testn_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_testn_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_mask_cmpeq_epi32_mask(k, and, zero) @@ -29474,7 +30868,8 @@ pub fn _mm_mask_testn_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmq))] -pub fn _mm512_testn_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_testn_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { let and = _mm512_and_epi64(a, b); let zero = _mm512_setzero_si512(); _mm512_cmpeq_epi64_mask(and, zero) @@ -29487,7 +30882,8 @@ pub fn _mm512_testn_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmq))] -pub fn _mm512_mask_testn_epi64_mask(k: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_testn_epi64_mask(k: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { let and = _mm512_and_epi64(a, b); let zero = _mm512_setzero_si512(); _mm512_mask_cmpeq_epi64_mask(k, and, zero) @@ -29500,7 +30896,8 @@ pub fn _mm512_mask_testn_epi64_mask(k: __mmask8, a: __m512i, b: __m512i) -> __mm #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmq))] -pub fn _mm256_testn_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_testn_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_cmpeq_epi64_mask(and, zero) @@ -29513,7 +30910,8 @@ pub fn _mm256_testn_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmq))] -pub fn _mm256_mask_testn_epi64_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_testn_epi64_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); let zero = _mm256_setzero_si256(); _mm256_mask_cmpeq_epi64_mask(k, and, zero) @@ -29526,7 +30924,8 @@ pub fn _mm256_mask_testn_epi64_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mm #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmq))] -pub fn _mm_testn_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_testn_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_cmpeq_epi64_mask(and, zero) @@ -29539,7 +30938,8 @@ pub fn _mm_testn_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vptestnmq))] -pub fn _mm_mask_testn_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_testn_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); let zero = _mm_setzero_si128(); _mm_mask_cmpeq_epi64_mask(k, and, zero) @@ -29651,7 +31051,8 @@ pub unsafe fn _mm512_stream_load_si512(mem_addr: *const __m512i) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set_ps( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set_ps( e0: f32, e1: f32, e2: f32, @@ -29681,7 +31082,8 @@ pub fn _mm512_set_ps( #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_setr_ps( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setr_ps( e0: f32, e1: f32, e2: f32, @@ -29713,7 +31115,8 @@ pub fn _mm512_setr_ps( #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set1_pd(a: f64) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set1_pd(a: f64) -> __m512d { unsafe { transmute(f64x8::splat(a)) } } @@ -29723,7 +31126,8 @@ pub fn _mm512_set1_pd(a: f64) -> __m512d { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set1_ps(a: f32) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set1_ps(a: f32) -> __m512 { unsafe { transmute(f32x16::splat(a)) } } @@ -29733,7 +31137,8 @@ pub fn _mm512_set1_ps(a: f32) -> __m512 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set_epi32( e15: i32, e14: i32, e13: i32, @@ -29762,7 +31167,8 @@ pub fn _mm512_set_epi32( #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set1_epi8(a: i8) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set1_epi8(a: i8) -> __m512i { unsafe { transmute(i8x64::splat(a)) } } @@ -29772,7 +31178,8 @@ pub fn _mm512_set1_epi8(a: i8) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set1_epi16(a: i16) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set1_epi16(a: i16) -> __m512i { unsafe { transmute(i16x32::splat(a)) } } @@ -29782,7 +31189,8 @@ pub fn _mm512_set1_epi16(a: i16) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set1_epi32(a: i32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set1_epi32(a: i32) -> __m512i { unsafe { transmute(i32x16::splat(a)) } } @@ -29793,7 +31201,8 @@ pub fn _mm512_set1_epi32(a: i32) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastd))] -pub fn _mm512_mask_set1_epi32(src: __m512i, k: __mmask16, a: i32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_set1_epi32(src: __m512i, k: __mmask16, a: i32) -> __m512i { unsafe { let r = _mm512_set1_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, r, src.as_i32x16())) @@ -29807,7 +31216,8 @@ pub fn _mm512_mask_set1_epi32(src: __m512i, k: __mmask16, a: i32) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastd))] -pub fn _mm512_maskz_set1_epi32(k: __mmask16, a: i32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_set1_epi32(k: __mmask16, a: i32) -> __m512i { unsafe { let r = _mm512_set1_epi32(a).as_i32x16(); transmute(simd_select_bitmask(k, r, i32x16::ZERO)) @@ -29821,7 +31231,8 @@ pub fn _mm512_maskz_set1_epi32(k: __mmask16, a: i32) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastd))] -pub fn _mm256_mask_set1_epi32(src: __m256i, k: __mmask8, a: i32) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_set1_epi32(src: __m256i, k: __mmask8, a: i32) -> __m256i { unsafe { let r = _mm256_set1_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, r, src.as_i32x8())) @@ -29835,7 +31246,8 @@ pub fn _mm256_mask_set1_epi32(src: __m256i, k: __mmask8, a: i32) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastd))] -pub fn _mm256_maskz_set1_epi32(k: __mmask8, a: i32) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_set1_epi32(k: __mmask8, a: i32) -> __m256i { unsafe { let r = _mm256_set1_epi32(a).as_i32x8(); transmute(simd_select_bitmask(k, r, i32x8::ZERO)) @@ -29849,7 +31261,8 @@ pub fn _mm256_maskz_set1_epi32(k: __mmask8, a: i32) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastd))] -pub fn _mm_mask_set1_epi32(src: __m128i, k: __mmask8, a: i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_set1_epi32(src: __m128i, k: __mmask8, a: i32) -> __m128i { unsafe { let r = _mm_set1_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, r, src.as_i32x4())) @@ -29863,7 +31276,8 @@ pub fn _mm_mask_set1_epi32(src: __m128i, k: __mmask8, a: i32) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastd))] -pub fn _mm_maskz_set1_epi32(k: __mmask8, a: i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_set1_epi32(k: __mmask8, a: i32) -> __m128i { unsafe { let r = _mm_set1_epi32(a).as_i32x4(); transmute(simd_select_bitmask(k, r, i32x4::ZERO)) @@ -29876,7 +31290,8 @@ pub fn _mm_maskz_set1_epi32(k: __mmask8, a: i32) -> __m128i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set1_epi64(a: i64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set1_epi64(a: i64) -> __m512i { unsafe { transmute(i64x8::splat(a)) } } @@ -29887,7 +31302,8 @@ pub fn _mm512_set1_epi64(a: i64) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastq))] -pub fn _mm512_mask_set1_epi64(src: __m512i, k: __mmask8, a: i64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_set1_epi64(src: __m512i, k: __mmask8, a: i64) -> __m512i { unsafe { let r = _mm512_set1_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, r, src.as_i64x8())) @@ -29901,7 +31317,8 @@ pub fn _mm512_mask_set1_epi64(src: __m512i, k: __mmask8, a: i64) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastq))] -pub fn _mm512_maskz_set1_epi64(k: __mmask8, a: i64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_set1_epi64(k: __mmask8, a: i64) -> __m512i { unsafe { let r = _mm512_set1_epi64(a).as_i64x8(); transmute(simd_select_bitmask(k, r, i64x8::ZERO)) @@ -29915,7 +31332,8 @@ pub fn _mm512_maskz_set1_epi64(k: __mmask8, a: i64) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastq))] -pub fn _mm256_mask_set1_epi64(src: __m256i, k: __mmask8, a: i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_set1_epi64(src: __m256i, k: __mmask8, a: i64) -> __m256i { unsafe { let r = _mm256_set1_epi64x(a).as_i64x4(); transmute(simd_select_bitmask(k, r, src.as_i64x4())) @@ -29929,7 +31347,8 @@ pub fn _mm256_mask_set1_epi64(src: __m256i, k: __mmask8, a: i64) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastq))] -pub fn _mm256_maskz_set1_epi64(k: __mmask8, a: i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_set1_epi64(k: __mmask8, a: i64) -> __m256i { unsafe { let r = _mm256_set1_epi64x(a).as_i64x4(); transmute(simd_select_bitmask(k, r, i64x4::ZERO)) @@ -29943,7 +31362,8 @@ pub fn _mm256_maskz_set1_epi64(k: __mmask8, a: i64) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastq))] -pub fn _mm_mask_set1_epi64(src: __m128i, k: __mmask8, a: i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_set1_epi64(src: __m128i, k: __mmask8, a: i64) -> __m128i { unsafe { let r = _mm_set1_epi64x(a).as_i64x2(); transmute(simd_select_bitmask(k, r, src.as_i64x2())) @@ -29957,7 +31377,8 @@ pub fn _mm_mask_set1_epi64(src: __m128i, k: __mmask8, a: i64) -> __m128i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpbroadcastq))] -pub fn _mm_maskz_set1_epi64(k: __mmask8, a: i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_set1_epi64(k: __mmask8, a: i64) -> __m128i { unsafe { let r = _mm_set1_epi64x(a).as_i64x2(); transmute(simd_select_bitmask(k, r, i64x2::ZERO)) @@ -29970,7 +31391,8 @@ pub fn _mm_maskz_set1_epi64(k: __mmask8, a: i64) -> __m128i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { _mm512_set_epi64(d, c, b, a, d, c, b, a) } @@ -29980,7 +31402,8 @@ pub fn _mm512_set4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_setr4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setr4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { _mm512_set_epi64(a, b, c, d, a, b, c, d) } @@ -30809,7 +32232,8 @@ pub fn _mm_mask_cmp_round_sd_mask( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_cmplt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmplt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_lt(a.as_u32x16(), b.as_u32x16())) } } @@ -30820,7 +32244,8 @@ pub fn _mm512_cmplt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_mask_cmplt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmplt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epu32_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -30831,7 +32256,8 @@ pub fn _mm512_mask_cmplt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_cmplt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmplt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_lt(a.as_u32x8(), b.as_u32x8())) } } @@ -30842,7 +32268,8 @@ pub fn _mm256_cmplt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_mask_cmplt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmplt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu32_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -30853,7 +32280,8 @@ pub fn _mm256_mask_cmplt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_cmplt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmplt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_lt(a.as_u32x4(), b.as_u32x4())) } } @@ -30864,7 +32292,8 @@ pub fn _mm_cmplt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_mask_cmplt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmplt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu32_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -30875,7 +32304,8 @@ pub fn _mm_mask_cmplt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_cmpgt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpgt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_gt(a.as_u32x16(), b.as_u32x16())) } } @@ -30886,7 +32316,8 @@ pub fn _mm512_cmpgt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_mask_cmpgt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpgt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epu32_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -30897,7 +32328,8 @@ pub fn _mm512_mask_cmpgt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_cmpgt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_gt(a.as_u32x8(), b.as_u32x8())) } } @@ -30908,7 +32340,8 @@ pub fn _mm256_cmpgt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu32_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -30919,7 +32352,8 @@ pub fn _mm256_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_cmpgt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_gt(a.as_u32x4(), b.as_u32x4())) } } @@ -30930,7 +32364,8 @@ pub fn _mm_cmpgt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu32_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -30941,7 +32376,8 @@ pub fn _mm_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_cmple_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmple_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_le(a.as_u32x16(), b.as_u32x16())) } } @@ -30952,7 +32388,8 @@ pub fn _mm512_cmple_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_mask_cmple_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmple_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epu32_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -30963,7 +32400,8 @@ pub fn _mm512_mask_cmple_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_cmple_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmple_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_le(a.as_u32x8(), b.as_u32x8())) } } @@ -30974,7 +32412,8 @@ pub fn _mm256_cmple_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_mask_cmple_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmple_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu32_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -30985,7 +32424,8 @@ pub fn _mm256_mask_cmple_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_cmple_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmple_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_le(a.as_u32x4(), b.as_u32x4())) } } @@ -30996,7 +32436,8 @@ pub fn _mm_cmple_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_mask_cmple_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmple_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu32_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -31007,7 +32448,8 @@ pub fn _mm_mask_cmple_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_cmpge_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpge_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_ge(a.as_u32x16(), b.as_u32x16())) } } @@ -31018,7 +32460,8 @@ pub fn _mm512_cmpge_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_mask_cmpge_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpge_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epu32_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -31029,7 +32472,8 @@ pub fn _mm512_mask_cmpge_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_cmpge_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpge_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_ge(a.as_u32x8(), b.as_u32x8())) } } @@ -31040,7 +32484,8 @@ pub fn _mm256_cmpge_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_mask_cmpge_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpge_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu32_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -31051,7 +32496,8 @@ pub fn _mm256_mask_cmpge_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_cmpge_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpge_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_ge(a.as_u32x4(), b.as_u32x4())) } } @@ -31062,7 +32508,8 @@ pub fn _mm_cmpge_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_mask_cmpge_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpge_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu32_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -31073,7 +32520,8 @@ pub fn _mm_mask_cmpge_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_cmpeq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpeq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_eq(a.as_u32x16(), b.as_u32x16())) } } @@ -31084,7 +32532,8 @@ pub fn _mm512_cmpeq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_mask_cmpeq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpeq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epu32_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -31095,7 +32544,8 @@ pub fn _mm512_mask_cmpeq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_cmpeq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_eq(a.as_u32x8(), b.as_u32x8())) } } @@ -31106,7 +32556,8 @@ pub fn _mm256_cmpeq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu32_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -31117,7 +32568,8 @@ pub fn _mm256_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_cmpeq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_eq(a.as_u32x4(), b.as_u32x4())) } } @@ -31128,7 +32580,8 @@ pub fn _mm_cmpeq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu32_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -31139,7 +32592,8 @@ pub fn _mm_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_cmpneq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpneq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_ne(a.as_u32x16(), b.as_u32x16())) } } @@ -31150,7 +32604,8 @@ pub fn _mm512_cmpneq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm512_mask_cmpneq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpneq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epu32_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -31161,7 +32616,8 @@ pub fn _mm512_mask_cmpneq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_cmpneq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpneq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_ne(a.as_u32x8(), b.as_u32x8())) } } @@ -31172,7 +32628,8 @@ pub fn _mm256_cmpneq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm256_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu32_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -31183,7 +32640,8 @@ pub fn _mm256_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_cmpneq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpneq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_ne(a.as_u32x4(), b.as_u32x4())) } } @@ -31194,7 +32652,8 @@ pub fn _mm_cmpneq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud -pub fn _mm_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu32_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -31206,7 +32665,11 @@ pub fn _mm_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mma #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm512_cmp_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmp_epu32_mask( + a: __m512i, + b: __m512i, +) -> __mmask16 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_u32x16(); @@ -31233,7 +32696,8 @@ pub fn _mm512_cmp_epu32_mask(a: __m512i, b: __m512i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm512_mask_cmp_epu32_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmp_epu32_mask( k1: __mmask16, a: __m512i, b: __m512i, @@ -31265,7 +32729,11 @@ pub fn _mm512_mask_cmp_epu32_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm256_cmp_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmp_epu32_mask( + a: __m256i, + b: __m256i, +) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_u32x8(); @@ -31292,7 +32760,8 @@ pub fn _mm256_cmp_epu32_mask(a: __m256i, b: __m256i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm256_mask_cmp_epu32_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmp_epu32_mask( k1: __mmask8, a: __m256i, b: __m256i, @@ -31324,7 +32793,8 @@ pub fn _mm256_mask_cmp_epu32_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm_cmp_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmp_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_u32x4(); @@ -31351,7 +32821,8 @@ pub fn _mm_cmp_epu32_mask(a: __m128i, b: __m128i) - #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm_mask_cmp_epu32_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmp_epu32_mask( k1: __mmask8, a: __m128i, b: __m128i, @@ -31382,7 +32853,8 @@ pub fn _mm_mask_cmp_epu32_mask( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_cmplt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmplt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_lt(a.as_i32x16(), b.as_i32x16())) } } @@ -31393,7 +32865,8 @@ pub fn _mm512_cmplt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_mask_cmplt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmplt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epi32_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -31404,7 +32877,8 @@ pub fn _mm512_mask_cmplt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_cmplt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmplt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_lt(a.as_i32x8(), b.as_i32x8())) } } @@ -31415,7 +32889,8 @@ pub fn _mm256_cmplt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_mask_cmplt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmplt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi32_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -31426,7 +32901,8 @@ pub fn _mm256_mask_cmplt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_cmplt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmplt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_lt(a.as_i32x4(), b.as_i32x4())) } } @@ -31437,7 +32913,8 @@ pub fn _mm_cmplt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_mask_cmplt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmplt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi32_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -31448,7 +32925,8 @@ pub fn _mm_mask_cmplt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_cmpgt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpgt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_gt(a.as_i32x16(), b.as_i32x16())) } } @@ -31459,7 +32937,8 @@ pub fn _mm512_cmpgt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_mask_cmpgt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpgt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epi32_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -31470,7 +32949,8 @@ pub fn _mm512_mask_cmpgt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_cmpgt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_gt(a.as_i32x8(), b.as_i32x8())) } } @@ -31481,7 +32961,8 @@ pub fn _mm256_cmpgt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi32_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -31492,7 +32973,8 @@ pub fn _mm256_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_cmpgt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_gt(a.as_i32x4(), b.as_i32x4())) } } @@ -31503,7 +32985,8 @@ pub fn _mm_cmpgt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi32_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -31514,7 +32997,8 @@ pub fn _mm_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_cmple_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmple_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_le(a.as_i32x16(), b.as_i32x16())) } } @@ -31525,7 +33009,8 @@ pub fn _mm512_cmple_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_mask_cmple_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmple_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epi32_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -31536,7 +33021,8 @@ pub fn _mm512_mask_cmple_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_cmple_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmple_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_le(a.as_i32x8(), b.as_i32x8())) } } @@ -31547,7 +33033,8 @@ pub fn _mm256_cmple_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_mask_cmple_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmple_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi32_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -31558,7 +33045,8 @@ pub fn _mm256_mask_cmple_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_cmple_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmple_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_le(a.as_i32x4(), b.as_i32x4())) } } @@ -31569,7 +33057,8 @@ pub fn _mm_cmple_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_mask_cmple_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmple_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi32_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -31580,7 +33069,8 @@ pub fn _mm_mask_cmple_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_cmpge_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpge_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_ge(a.as_i32x16(), b.as_i32x16())) } } @@ -31591,7 +33081,8 @@ pub fn _mm512_cmpge_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_mask_cmpge_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpge_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epi32_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -31602,7 +33093,8 @@ pub fn _mm512_mask_cmpge_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_cmpge_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpge_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_ge(a.as_i32x8(), b.as_i32x8())) } } @@ -31613,7 +33105,8 @@ pub fn _mm256_cmpge_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_mask_cmpge_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpge_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi32_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -31624,7 +33117,8 @@ pub fn _mm256_mask_cmpge_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_cmpge_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpge_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_ge(a.as_i32x4(), b.as_i32x4())) } } @@ -31635,7 +33129,8 @@ pub fn _mm_cmpge_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_mask_cmpge_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpge_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi32_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -31646,7 +33141,8 @@ pub fn _mm_mask_cmpge_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_cmpeq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpeq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_eq(a.as_i32x16(), b.as_i32x16())) } } @@ -31657,7 +33153,8 @@ pub fn _mm512_cmpeq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_mask_cmpeq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpeq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epi32_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -31668,7 +33165,8 @@ pub fn _mm512_mask_cmpeq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_cmpeq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_eq(a.as_i32x8(), b.as_i32x8())) } } @@ -31679,7 +33177,8 @@ pub fn _mm256_cmpeq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi32_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -31690,7 +33189,8 @@ pub fn _mm256_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_cmpeq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_eq(a.as_i32x4(), b.as_i32x4())) } } @@ -31701,7 +33201,8 @@ pub fn _mm_cmpeq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi32_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -31712,7 +33213,8 @@ pub fn _mm_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_cmpneq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpneq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { unsafe { simd_bitmask::(simd_ne(a.as_i32x16(), b.as_i32x16())) } } @@ -31723,7 +33225,8 @@ pub fn _mm512_cmpneq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm512_mask_cmpneq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpneq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_mask_cmp_epi32_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -31734,7 +33237,8 @@ pub fn _mm512_mask_cmpneq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> _ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_cmpneq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpneq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::(simd_ne(a.as_i32x8(), b.as_i32x8())) } } @@ -31745,7 +33249,8 @@ pub fn _mm256_cmpneq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm256_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi32_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -31756,7 +33261,8 @@ pub fn _mm256_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_cmpneq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpneq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::(simd_ne(a.as_i32x4(), b.as_i32x4())) } } @@ -31767,7 +33273,8 @@ pub fn _mm_cmpneq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd -pub fn _mm_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi32_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -31779,7 +33286,11 @@ pub fn _mm_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mma #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm512_cmp_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmp_epi32_mask( + a: __m512i, + b: __m512i, +) -> __mmask16 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_i32x16(); @@ -31806,7 +33317,8 @@ pub fn _mm512_cmp_epi32_mask(a: __m512i, b: __m512i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm512_mask_cmp_epi32_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmp_epi32_mask( k1: __mmask16, a: __m512i, b: __m512i, @@ -31838,7 +33350,11 @@ pub fn _mm512_mask_cmp_epi32_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm256_cmp_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmp_epi32_mask( + a: __m256i, + b: __m256i, +) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_i32x8(); @@ -31865,7 +33381,8 @@ pub fn _mm256_cmp_epi32_mask(a: __m256i, b: __m256i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm256_mask_cmp_epi32_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmp_epi32_mask( k1: __mmask8, a: __m256i, b: __m256i, @@ -31897,7 +33414,8 @@ pub fn _mm256_mask_cmp_epi32_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm_cmp_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmp_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_i32x4(); @@ -31924,7 +33442,8 @@ pub fn _mm_cmp_epi32_mask(a: __m128i, b: __m128i) - #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm_mask_cmp_epi32_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmp_epi32_mask( k1: __mmask8, a: __m128i, b: __m128i, @@ -31955,7 +33474,8 @@ pub fn _mm_mask_cmp_epi32_mask( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_cmplt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmplt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_lt(a.as_u64x8(), b.as_u64x8())) } } @@ -31966,7 +33486,8 @@ pub fn _mm512_cmplt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_mask_cmplt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmplt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epu64_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -31977,7 +33498,8 @@ pub fn _mm512_mask_cmplt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_cmplt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmplt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_lt(a.as_u64x4(), b.as_u64x4())) } } @@ -31988,7 +33510,8 @@ pub fn _mm256_cmplt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_mask_cmplt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmplt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu64_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -31999,7 +33522,8 @@ pub fn _mm256_mask_cmplt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_cmplt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmplt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_lt(a.as_u64x2(), b.as_u64x2())) } } @@ -32010,7 +33534,8 @@ pub fn _mm_cmplt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_mask_cmplt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmplt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu64_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -32021,7 +33546,8 @@ pub fn _mm_mask_cmplt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_cmpgt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpgt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_gt(a.as_u64x8(), b.as_u64x8())) } } @@ -32032,7 +33558,8 @@ pub fn _mm512_cmpgt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epu64_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -32043,7 +33570,8 @@ pub fn _mm512_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_cmpgt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_gt(a.as_u64x4(), b.as_u64x4())) } } @@ -32054,7 +33582,8 @@ pub fn _mm256_cmpgt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu64_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -32065,7 +33594,8 @@ pub fn _mm256_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_cmpgt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_gt(a.as_u64x2(), b.as_u64x2())) } } @@ -32076,7 +33606,8 @@ pub fn _mm_cmpgt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu64_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -32087,7 +33618,8 @@ pub fn _mm_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_cmple_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmple_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_le(a.as_u64x8(), b.as_u64x8())) } } @@ -32098,7 +33630,8 @@ pub fn _mm512_cmple_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_mask_cmple_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmple_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epu64_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -32109,7 +33642,8 @@ pub fn _mm512_mask_cmple_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_cmple_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmple_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_le(a.as_u64x4(), b.as_u64x4())) } } @@ -32120,7 +33654,8 @@ pub fn _mm256_cmple_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_mask_cmple_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmple_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu64_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -32131,7 +33666,8 @@ pub fn _mm256_mask_cmple_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_cmple_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmple_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_le(a.as_u64x2(), b.as_u64x2())) } } @@ -32142,7 +33678,8 @@ pub fn _mm_cmple_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_mask_cmple_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmple_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu64_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -32153,7 +33690,8 @@ pub fn _mm_mask_cmple_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_cmpge_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpge_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_ge(a.as_u64x8(), b.as_u64x8())) } } @@ -32164,7 +33702,8 @@ pub fn _mm512_cmpge_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_mask_cmpge_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpge_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epu64_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -32175,7 +33714,8 @@ pub fn _mm512_mask_cmpge_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_cmpge_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpge_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_ge(a.as_u64x4(), b.as_u64x4())) } } @@ -32186,7 +33726,8 @@ pub fn _mm256_cmpge_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_mask_cmpge_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpge_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu64_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -32197,7 +33738,8 @@ pub fn _mm256_mask_cmpge_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_cmpge_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpge_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_ge(a.as_u64x2(), b.as_u64x2())) } } @@ -32208,7 +33750,8 @@ pub fn _mm_cmpge_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_mask_cmpge_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpge_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu64_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -32219,7 +33762,8 @@ pub fn _mm_mask_cmpge_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_cmpeq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpeq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_eq(a.as_u64x8(), b.as_u64x8())) } } @@ -32230,7 +33774,8 @@ pub fn _mm512_cmpeq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epu64_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -32241,7 +33786,8 @@ pub fn _mm512_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_cmpeq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_eq(a.as_u64x4(), b.as_u64x4())) } } @@ -32252,7 +33798,8 @@ pub fn _mm256_cmpeq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu64_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -32263,7 +33810,8 @@ pub fn _mm256_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_cmpeq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_eq(a.as_u64x2(), b.as_u64x2())) } } @@ -32274,7 +33822,8 @@ pub fn _mm_cmpeq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu64_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -32285,7 +33834,8 @@ pub fn _mm_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_cmpneq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpneq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_ne(a.as_u64x8(), b.as_u64x8())) } } @@ -32296,7 +33846,8 @@ pub fn _mm512_cmpneq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm512_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epu64_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -32307,7 +33858,8 @@ pub fn _mm512_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_cmpneq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpneq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_ne(a.as_u64x4(), b.as_u64x4())) } } @@ -32318,7 +33870,8 @@ pub fn _mm256_cmpneq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm256_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epu64_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -32329,7 +33882,8 @@ pub fn _mm256_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_cmpneq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpneq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_ne(a.as_u64x2(), b.as_u64x2())) } } @@ -32340,7 +33894,8 @@ pub fn _mm_cmpneq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq -pub fn _mm_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epu64_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -32352,7 +33907,11 @@ pub fn _mm_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mma #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm512_cmp_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmp_epu64_mask( + a: __m512i, + b: __m512i, +) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_u64x8(); @@ -32379,7 +33938,8 @@ pub fn _mm512_cmp_epu64_mask(a: __m512i, b: __m512i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm512_mask_cmp_epu64_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmp_epu64_mask( k1: __mmask8, a: __m512i, b: __m512i, @@ -32411,7 +33971,11 @@ pub fn _mm512_mask_cmp_epu64_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm256_cmp_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmp_epu64_mask( + a: __m256i, + b: __m256i, +) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_u64x4(); @@ -32438,7 +34002,8 @@ pub fn _mm256_cmp_epu64_mask(a: __m256i, b: __m256i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm256_mask_cmp_epu64_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmp_epu64_mask( k1: __mmask8, a: __m256i, b: __m256i, @@ -32470,7 +34035,8 @@ pub fn _mm256_mask_cmp_epu64_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm_cmp_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmp_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_u64x2(); @@ -32497,7 +34063,8 @@ pub fn _mm_cmp_epu64_mask(a: __m128i, b: __m128i) - #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm_mask_cmp_epu64_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmp_epu64_mask( k1: __mmask8, a: __m128i, b: __m128i, @@ -32528,7 +34095,8 @@ pub fn _mm_mask_cmp_epu64_mask( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_cmplt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmplt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_lt(a.as_i64x8(), b.as_i64x8())) } } @@ -32539,7 +34107,8 @@ pub fn _mm512_cmplt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_mask_cmplt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmplt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epi64_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -32550,7 +34119,8 @@ pub fn _mm512_mask_cmplt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_cmplt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmplt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_lt(a.as_i64x4(), b.as_i64x4())) } } @@ -32561,7 +34131,8 @@ pub fn _mm256_cmplt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_mask_cmplt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmplt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi64_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -32572,7 +34143,8 @@ pub fn _mm256_mask_cmplt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_cmplt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmplt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_lt(a.as_i64x2(), b.as_i64x2())) } } @@ -32583,7 +34155,8 @@ pub fn _mm_cmplt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_mask_cmplt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmplt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi64_mask::<_MM_CMPINT_LT>(k1, a, b) } @@ -32594,7 +34167,8 @@ pub fn _mm_mask_cmplt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_cmpgt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpgt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_gt(a.as_i64x8(), b.as_i64x8())) } } @@ -32605,7 +34179,8 @@ pub fn _mm512_cmpgt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epi64_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -32616,7 +34191,8 @@ pub fn _mm512_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_cmpgt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpgt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_gt(a.as_i64x4(), b.as_i64x4())) } } @@ -32627,7 +34203,8 @@ pub fn _mm256_cmpgt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi64_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -32638,7 +34215,8 @@ pub fn _mm256_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_cmpgt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_gt(a.as_i64x2(), b.as_i64x2())) } } @@ -32649,7 +34227,8 @@ pub fn _mm_cmpgt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi64_mask::<_MM_CMPINT_NLE>(k1, a, b) } @@ -32660,7 +34239,8 @@ pub fn _mm_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_cmple_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmple_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_le(a.as_i64x8(), b.as_i64x8())) } } @@ -32671,7 +34251,8 @@ pub fn _mm512_cmple_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_mask_cmple_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmple_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epi64_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -32682,7 +34263,8 @@ pub fn _mm512_mask_cmple_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_cmple_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmple_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_le(a.as_i64x4(), b.as_i64x4())) } } @@ -32693,7 +34275,8 @@ pub fn _mm256_cmple_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_mask_cmple_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmple_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi64_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -32704,7 +34287,8 @@ pub fn _mm256_mask_cmple_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_cmple_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmple_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_le(a.as_i64x2(), b.as_i64x2())) } } @@ -32715,7 +34299,8 @@ pub fn _mm_cmple_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_mask_cmple_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmple_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi64_mask::<_MM_CMPINT_LE>(k1, a, b) } @@ -32726,7 +34311,8 @@ pub fn _mm_mask_cmple_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_cmpge_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpge_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_ge(a.as_i64x8(), b.as_i64x8())) } } @@ -32737,7 +34323,8 @@ pub fn _mm512_cmpge_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_mask_cmpge_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpge_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epi64_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -32748,7 +34335,8 @@ pub fn _mm512_mask_cmpge_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_cmpge_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpge_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_ge(a.as_i64x4(), b.as_i64x4())) } } @@ -32759,7 +34347,8 @@ pub fn _mm256_cmpge_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_mask_cmpge_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpge_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi64_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -32770,7 +34359,8 @@ pub fn _mm256_mask_cmpge_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_cmpge_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpge_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_ge(a.as_i64x2(), b.as_i64x2())) } } @@ -32781,7 +34371,8 @@ pub fn _mm_cmpge_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_mask_cmpge_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpge_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi64_mask::<_MM_CMPINT_NLT>(k1, a, b) } @@ -32792,7 +34383,8 @@ pub fn _mm_mask_cmpge_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_cmpeq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpeq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_eq(a.as_i64x8(), b.as_i64x8())) } } @@ -32803,7 +34395,8 @@ pub fn _mm512_cmpeq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epi64_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -32814,7 +34407,8 @@ pub fn _mm512_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_cmpeq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpeq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_eq(a.as_i64x4(), b.as_i64x4())) } } @@ -32825,7 +34419,8 @@ pub fn _mm256_cmpeq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi64_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -32836,7 +34431,8 @@ pub fn _mm256_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_cmpeq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_eq(a.as_i64x2(), b.as_i64x2())) } } @@ -32847,7 +34443,8 @@ pub fn _mm_cmpeq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi64_mask::<_MM_CMPINT_EQ>(k1, a, b) } @@ -32858,7 +34455,8 @@ pub fn _mm_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmas #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_cmpneq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmpneq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { unsafe { simd_bitmask::<__m512i, _>(simd_ne(a.as_i64x8(), b.as_i64x8())) } } @@ -32869,7 +34467,8 @@ pub fn _mm512_cmpneq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm512_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_mask_cmp_epi64_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -32880,7 +34479,8 @@ pub fn _mm512_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_cmpneq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmpneq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { unsafe { simd_bitmask::<__m256i, _>(simd_ne(a.as_i64x4(), b.as_i64x4())) } } @@ -32891,7 +34491,8 @@ pub fn _mm256_cmpneq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm256_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_mask_cmp_epi64_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -32902,7 +34503,8 @@ pub fn _mm256_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __ #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_cmpneq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpneq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { simd_bitmask::<__m128i, _>(simd_ne(a.as_i64x2(), b.as_i64x2())) } } @@ -32913,7 +34515,8 @@ pub fn _mm_cmpneq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq -pub fn _mm_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_mask_cmp_epi64_mask::<_MM_CMPINT_NE>(k1, a, b) } @@ -32925,7 +34528,11 @@ pub fn _mm_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mma #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm512_cmp_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cmp_epi64_mask( + a: __m512i, + b: __m512i, +) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_i64x8(); @@ -32952,7 +34559,8 @@ pub fn _mm512_cmp_epi64_mask(a: __m512i, b: __m512i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm512_mask_cmp_epi64_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_cmp_epi64_mask( k1: __mmask8, a: __m512i, b: __m512i, @@ -32984,7 +34592,11 @@ pub fn _mm512_mask_cmp_epi64_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm256_cmp_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cmp_epi64_mask( + a: __m256i, + b: __m256i, +) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_i64x4(); @@ -33011,7 +34623,8 @@ pub fn _mm256_cmp_epi64_mask(a: __m256i, b: __m256i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm256_mask_cmp_epi64_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_cmp_epi64_mask( k1: __mmask8, a: __m256i, b: __m256i, @@ -33043,7 +34656,8 @@ pub fn _mm256_mask_cmp_epi64_mask( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm_cmp_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmp_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM3, 3); let a = a.as_i64x2(); @@ -33070,7 +34684,8 @@ pub fn _mm_cmp_epi64_mask(a: __m128i, b: __m128i) - #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] -pub fn _mm_mask_cmp_epi64_mask( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_cmp_epi64_mask( k1: __mmask8, a: __m128i, b: __m128i, @@ -33100,8 +34715,9 @@ pub fn _mm_mask_cmp_epi64_mask( #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_add_epi32(a: __m512i) -> i32 { - unsafe { simd_reduce_add_unordered(a.as_i32x16()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_add_epi32(a: __m512i) -> i32 { + unsafe { simd_reduce_add_ordered(a.as_i32x16(), 0) } } /// Reduce the packed 32-bit integers in a by addition using mask k. Returns the sum of all active elements in a. @@ -33110,8 +34726,9 @@ pub fn _mm512_reduce_add_epi32(a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_add_epi32(k: __mmask16, a: __m512i) -> i32 { - unsafe { simd_reduce_add_unordered(simd_select_bitmask(k, a.as_i32x16(), i32x16::ZERO)) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_add_epi32(k: __mmask16, a: __m512i) -> i32 { + unsafe { simd_reduce_add_ordered(simd_select_bitmask(k, a.as_i32x16(), i32x16::ZERO), 0) } } /// Reduce the packed 64-bit integers in a by addition. Returns the sum of all elements in a. @@ -33120,8 +34737,9 @@ pub fn _mm512_mask_reduce_add_epi32(k: __mmask16, a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_add_epi64(a: __m512i) -> i64 { - unsafe { simd_reduce_add_unordered(a.as_i64x8()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_add_epi64(a: __m512i) -> i64 { + unsafe { simd_reduce_add_ordered(a.as_i64x8(), 0) } } /// Reduce the packed 64-bit integers in a by addition using mask k. Returns the sum of all active elements in a. @@ -33130,8 +34748,9 @@ pub fn _mm512_reduce_add_epi64(a: __m512i) -> i64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_add_epi64(k: __mmask8, a: __m512i) -> i64 { - unsafe { simd_reduce_add_unordered(simd_select_bitmask(k, a.as_i64x8(), i64x8::ZERO)) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_add_epi64(k: __mmask8, a: __m512i) -> i64 { + unsafe { simd_reduce_add_ordered(simd_select_bitmask(k, a.as_i64x8(), i64x8::ZERO), 0) } } /// Reduce the packed single-precision (32-bit) floating-point elements in a by addition. Returns the sum of all elements in a. @@ -33140,7 +34759,8 @@ pub fn _mm512_mask_reduce_add_epi64(k: __mmask8, a: __m512i) -> i64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_add_ps(a: __m512) -> f32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_add_ps(a: __m512) -> f32 { unsafe { // we have to use `simd_shuffle` here because `_mm512_extractf32x8_ps` is in AVX512DQ let a = _mm256_add_ps( @@ -33159,7 +34779,8 @@ pub fn _mm512_reduce_add_ps(a: __m512) -> f32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_add_ps(k: __mmask16, a: __m512) -> f32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_add_ps(k: __mmask16, a: __m512) -> f32 { unsafe { _mm512_reduce_add_ps(simd_select_bitmask(k, a, _mm512_setzero_ps())) } } @@ -33169,7 +34790,8 @@ pub fn _mm512_mask_reduce_add_ps(k: __mmask16, a: __m512) -> f32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_add_pd(a: __m512d) -> f64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_add_pd(a: __m512d) -> f64 { unsafe { let a = _mm256_add_pd( _mm512_extractf64x4_pd::<0>(a), @@ -33186,7 +34808,8 @@ pub fn _mm512_reduce_add_pd(a: __m512d) -> f64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_add_pd(k: __mmask8, a: __m512d) -> f64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_add_pd(k: __mmask8, a: __m512d) -> f64 { unsafe { _mm512_reduce_add_pd(simd_select_bitmask(k, a, _mm512_setzero_pd())) } } @@ -33196,8 +34819,9 @@ pub fn _mm512_mask_reduce_add_pd(k: __mmask8, a: __m512d) -> f64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_mul_epi32(a: __m512i) -> i32 { - unsafe { simd_reduce_mul_unordered(a.as_i32x16()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_mul_epi32(a: __m512i) -> i32 { + unsafe { simd_reduce_mul_ordered(a.as_i32x16(), 1) } } /// Reduce the packed 32-bit integers in a by multiplication using mask k. Returns the product of all active elements in a. @@ -33206,13 +34830,13 @@ pub fn _mm512_reduce_mul_epi32(a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_mul_epi32(k: __mmask16, a: __m512i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_mul_epi32(k: __mmask16, a: __m512i) -> i32 { unsafe { - simd_reduce_mul_unordered(simd_select_bitmask( - k, - a.as_i32x16(), - _mm512_set1_epi32(1).as_i32x16(), - )) + simd_reduce_mul_ordered( + simd_select_bitmask(k, a.as_i32x16(), _mm512_set1_epi32(1).as_i32x16()), + 1, + ) } } @@ -33222,8 +34846,9 @@ pub fn _mm512_mask_reduce_mul_epi32(k: __mmask16, a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_mul_epi64(a: __m512i) -> i64 { - unsafe { simd_reduce_mul_unordered(a.as_i64x8()) } +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_mul_epi64(a: __m512i) -> i64 { + unsafe { simd_reduce_mul_ordered(a.as_i64x8(), 1) } } /// Reduce the packed 64-bit integers in a by multiplication using mask k. Returns the product of all active elements in a. @@ -33232,13 +34857,13 @@ pub fn _mm512_reduce_mul_epi64(a: __m512i) -> i64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_mul_epi64(k: __mmask8, a: __m512i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_mul_epi64(k: __mmask8, a: __m512i) -> i64 { unsafe { - simd_reduce_mul_unordered(simd_select_bitmask( - k, - a.as_i64x8(), - _mm512_set1_epi64(1).as_i64x8(), - )) + simd_reduce_mul_ordered( + simd_select_bitmask(k, a.as_i64x8(), _mm512_set1_epi64(1).as_i64x8()), + 1, + ) } } @@ -33248,7 +34873,8 @@ pub fn _mm512_mask_reduce_mul_epi64(k: __mmask8, a: __m512i) -> i64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_mul_ps(a: __m512) -> f32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_mul_ps(a: __m512) -> f32 { unsafe { // we have to use `simd_shuffle` here because `_mm512_extractf32x8_ps` is in AVX512DQ let a = _mm256_mul_ps( @@ -33267,7 +34893,8 @@ pub fn _mm512_reduce_mul_ps(a: __m512) -> f32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_mul_ps(k: __mmask16, a: __m512) -> f32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_mul_ps(k: __mmask16, a: __m512) -> f32 { unsafe { _mm512_reduce_mul_ps(simd_select_bitmask(k, a, _mm512_set1_ps(1.))) } } @@ -33277,7 +34904,8 @@ pub fn _mm512_mask_reduce_mul_ps(k: __mmask16, a: __m512) -> f32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_mul_pd(a: __m512d) -> f64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_mul_pd(a: __m512d) -> f64 { unsafe { let a = _mm256_mul_pd( _mm512_extractf64x4_pd::<0>(a), @@ -33294,7 +34922,8 @@ pub fn _mm512_reduce_mul_pd(a: __m512d) -> f64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_mul_pd(k: __mmask8, a: __m512d) -> f64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_mul_pd(k: __mmask8, a: __m512d) -> f64 { unsafe { _mm512_reduce_mul_pd(simd_select_bitmask(k, a, _mm512_set1_pd(1.))) } } @@ -33304,7 +34933,8 @@ pub fn _mm512_mask_reduce_mul_pd(k: __mmask8, a: __m512d) -> f64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_max_epi32(a: __m512i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_max_epi32(a: __m512i) -> i32 { unsafe { simd_reduce_max(a.as_i32x16()) } } @@ -33314,7 +34944,8 @@ pub fn _mm512_reduce_max_epi32(a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_max_epi32(k: __mmask16, a: __m512i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_max_epi32(k: __mmask16, a: __m512i) -> i32 { unsafe { simd_reduce_max(simd_select_bitmask( k, @@ -33330,7 +34961,8 @@ pub fn _mm512_mask_reduce_max_epi32(k: __mmask16, a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_max_epi64(a: __m512i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_max_epi64(a: __m512i) -> i64 { unsafe { simd_reduce_max(a.as_i64x8()) } } @@ -33340,7 +34972,8 @@ pub fn _mm512_reduce_max_epi64(a: __m512i) -> i64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_max_epi64(k: __mmask8, a: __m512i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_max_epi64(k: __mmask8, a: __m512i) -> i64 { unsafe { simd_reduce_max(simd_select_bitmask(k, a.as_i64x8(), i64x8::splat(i64::MIN))) } } @@ -33350,7 +34983,8 @@ pub fn _mm512_mask_reduce_max_epi64(k: __mmask8, a: __m512i) -> i64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_max_epu32(a: __m512i) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_max_epu32(a: __m512i) -> u32 { unsafe { simd_reduce_max(a.as_u32x16()) } } @@ -33360,7 +34994,8 @@ pub fn _mm512_reduce_max_epu32(a: __m512i) -> u32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_max_epu32(k: __mmask16, a: __m512i) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_max_epu32(k: __mmask16, a: __m512i) -> u32 { unsafe { simd_reduce_max(simd_select_bitmask(k, a.as_u32x16(), u32x16::ZERO)) } } @@ -33370,7 +35005,8 @@ pub fn _mm512_mask_reduce_max_epu32(k: __mmask16, a: __m512i) -> u32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_max_epu64(a: __m512i) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_max_epu64(a: __m512i) -> u64 { unsafe { simd_reduce_max(a.as_u64x8()) } } @@ -33380,7 +35016,8 @@ pub fn _mm512_reduce_max_epu64(a: __m512i) -> u64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_max_epu64(k: __mmask8, a: __m512i) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_max_epu64(k: __mmask8, a: __m512i) -> u64 { unsafe { simd_reduce_max(simd_select_bitmask(k, a.as_u64x8(), u64x8::ZERO)) } } @@ -33445,7 +35082,8 @@ pub fn _mm512_mask_reduce_max_pd(k: __mmask8, a: __m512d) -> f64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_min_epi32(a: __m512i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_min_epi32(a: __m512i) -> i32 { unsafe { simd_reduce_min(a.as_i32x16()) } } @@ -33455,7 +35093,8 @@ pub fn _mm512_reduce_min_epi32(a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_min_epi32(k: __mmask16, a: __m512i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_min_epi32(k: __mmask16, a: __m512i) -> i32 { unsafe { simd_reduce_min(simd_select_bitmask( k, @@ -33471,7 +35110,8 @@ pub fn _mm512_mask_reduce_min_epi32(k: __mmask16, a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_min_epi64(a: __m512i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_min_epi64(a: __m512i) -> i64 { unsafe { simd_reduce_min(a.as_i64x8()) } } @@ -33481,7 +35121,8 @@ pub fn _mm512_reduce_min_epi64(a: __m512i) -> i64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_min_epi64(k: __mmask8, a: __m512i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_min_epi64(k: __mmask8, a: __m512i) -> i64 { unsafe { simd_reduce_min(simd_select_bitmask(k, a.as_i64x8(), i64x8::splat(i64::MAX))) } } @@ -33491,7 +35132,8 @@ pub fn _mm512_mask_reduce_min_epi64(k: __mmask8, a: __m512i) -> i64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_min_epu32(a: __m512i) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_min_epu32(a: __m512i) -> u32 { unsafe { simd_reduce_min(a.as_u32x16()) } } @@ -33501,7 +35143,8 @@ pub fn _mm512_reduce_min_epu32(a: __m512i) -> u32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_min_epu32(k: __mmask16, a: __m512i) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_min_epu32(k: __mmask16, a: __m512i) -> u32 { unsafe { simd_reduce_min(simd_select_bitmask( k, @@ -33517,7 +35160,8 @@ pub fn _mm512_mask_reduce_min_epu32(k: __mmask16, a: __m512i) -> u32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_min_epu64(a: __m512i) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_min_epu64(a: __m512i) -> u64 { unsafe { simd_reduce_min(a.as_u64x8()) } } @@ -33527,7 +35171,8 @@ pub fn _mm512_reduce_min_epu64(a: __m512i) -> u64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_min_epu64(k: __mmask8, a: __m512i) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_min_epu64(k: __mmask8, a: __m512i) -> u64 { unsafe { simd_reduce_min(simd_select_bitmask(k, a.as_u64x8(), u64x8::splat(u64::MAX))) } } @@ -33592,7 +35237,8 @@ pub fn _mm512_mask_reduce_min_pd(k: __mmask8, a: __m512d) -> f64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_and_epi32(a: __m512i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_and_epi32(a: __m512i) -> i32 { unsafe { simd_reduce_and(a.as_i32x16()) } } @@ -33602,7 +35248,8 @@ pub fn _mm512_reduce_and_epi32(a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_and_epi32(k: __mmask16, a: __m512i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_and_epi32(k: __mmask16, a: __m512i) -> i32 { unsafe { simd_reduce_and(simd_select_bitmask(k, a.as_i32x16(), i32x16::splat(-1))) } } @@ -33612,7 +35259,8 @@ pub fn _mm512_mask_reduce_and_epi32(k: __mmask16, a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_and_epi64(a: __m512i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_and_epi64(a: __m512i) -> i64 { unsafe { simd_reduce_and(a.as_i64x8()) } } @@ -33622,7 +35270,8 @@ pub fn _mm512_reduce_and_epi64(a: __m512i) -> i64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_and_epi64(k: __mmask8, a: __m512i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_and_epi64(k: __mmask8, a: __m512i) -> i64 { unsafe { simd_reduce_and(simd_select_bitmask(k, a.as_i64x8(), i64x8::splat(-1))) } } @@ -33632,7 +35281,8 @@ pub fn _mm512_mask_reduce_and_epi64(k: __mmask8, a: __m512i) -> i64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_or_epi32(a: __m512i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_or_epi32(a: __m512i) -> i32 { unsafe { simd_reduce_or(a.as_i32x16()) } } @@ -33642,7 +35292,8 @@ pub fn _mm512_reduce_or_epi32(a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_or_epi32(k: __mmask16, a: __m512i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_or_epi32(k: __mmask16, a: __m512i) -> i32 { unsafe { simd_reduce_or(simd_select_bitmask(k, a.as_i32x16(), i32x16::ZERO)) } } @@ -33652,7 +35303,8 @@ pub fn _mm512_mask_reduce_or_epi32(k: __mmask16, a: __m512i) -> i32 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_reduce_or_epi64(a: __m512i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_or_epi64(a: __m512i) -> i64 { unsafe { simd_reduce_or(a.as_i64x8()) } } @@ -33662,7 +35314,8 @@ pub fn _mm512_reduce_or_epi64(a: __m512i) -> i64 { #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_mask_reduce_or_epi64(k: __mmask8, a: __m512i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_reduce_or_epi64(k: __mmask8, a: __m512i) -> i64 { unsafe { simd_reduce_or(simd_select_bitmask(k, a.as_i64x8(), i64x8::ZERO)) } } @@ -33676,7 +35329,8 @@ pub fn _mm512_mask_reduce_or_epi64(k: __mmask8, a: __m512i) -> i64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] // This intrinsic has no corresponding instruction. -pub fn _mm512_undefined_pd() -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_undefined_pd() -> __m512d { unsafe { const { mem::zeroed() } } } @@ -33690,7 +35344,8 @@ pub fn _mm512_undefined_pd() -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] // This intrinsic has no corresponding instruction. -pub fn _mm512_undefined_ps() -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_undefined_ps() -> __m512 { unsafe { const { mem::zeroed() } } } @@ -33704,7 +35359,8 @@ pub fn _mm512_undefined_ps() -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] // This intrinsic has no corresponding instruction. -pub fn _mm512_undefined_epi32() -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_undefined_epi32() -> __m512i { unsafe { const { mem::zeroed() } } } @@ -33718,7 +35374,8 @@ pub fn _mm512_undefined_epi32() -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] // This intrinsic has no corresponding instruction. -pub fn _mm512_undefined() -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_undefined() -> __m512 { unsafe { const { mem::zeroed() } } } @@ -33729,7 +35386,8 @@ pub fn _mm512_undefined() -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 -pub unsafe fn _mm512_loadu_epi32(mem_addr: *const i32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_loadu_epi32(mem_addr: *const i32) -> __m512i { ptr::read_unaligned(mem_addr as *const __m512i) } @@ -33740,7 +35398,8 @@ pub unsafe fn _mm512_loadu_epi32(mem_addr: *const i32) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 -pub unsafe fn _mm256_loadu_epi32(mem_addr: *const i32) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_loadu_epi32(mem_addr: *const i32) -> __m256i { ptr::read_unaligned(mem_addr as *const __m256i) } @@ -33751,7 +35410,8 @@ pub unsafe fn _mm256_loadu_epi32(mem_addr: *const i32) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 -pub unsafe fn _mm_loadu_epi32(mem_addr: *const i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadu_epi32(mem_addr: *const i32) -> __m128i { ptr::read_unaligned(mem_addr as *const __m128i) } @@ -34257,7 +35917,8 @@ pub unsafe fn _mm_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i32, k: __mmask8, #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 -pub unsafe fn _mm512_storeu_epi32(mem_addr: *mut i32, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_storeu_epi32(mem_addr: *mut i32, a: __m512i) { ptr::write_unaligned(mem_addr as *mut __m512i, a); } @@ -34268,7 +35929,8 @@ pub unsafe fn _mm512_storeu_epi32(mem_addr: *mut i32, a: __m512i) { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 -pub unsafe fn _mm256_storeu_epi32(mem_addr: *mut i32, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_storeu_epi32(mem_addr: *mut i32, a: __m256i) { ptr::write_unaligned(mem_addr as *mut __m256i, a); } @@ -34279,7 +35941,8 @@ pub unsafe fn _mm256_storeu_epi32(mem_addr: *mut i32, a: __m256i) { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 -pub unsafe fn _mm_storeu_epi32(mem_addr: *mut i32, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeu_epi32(mem_addr: *mut i32, a: __m128i) { ptr::write_unaligned(mem_addr as *mut __m128i, a); } @@ -34290,7 +35953,8 @@ pub unsafe fn _mm_storeu_epi32(mem_addr: *mut i32, a: __m128i) { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 -pub unsafe fn _mm512_loadu_epi64(mem_addr: *const i64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_loadu_epi64(mem_addr: *const i64) -> __m512i { ptr::read_unaligned(mem_addr as *const __m512i) } @@ -34301,7 +35965,8 @@ pub unsafe fn _mm512_loadu_epi64(mem_addr: *const i64) -> __m512i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 -pub unsafe fn _mm256_loadu_epi64(mem_addr: *const i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_loadu_epi64(mem_addr: *const i64) -> __m256i { ptr::read_unaligned(mem_addr as *const __m256i) } @@ -34312,7 +35977,8 @@ pub unsafe fn _mm256_loadu_epi64(mem_addr: *const i64) -> __m256i { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 -pub unsafe fn _mm_loadu_epi64(mem_addr: *const i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadu_epi64(mem_addr: *const i64) -> __m128i { ptr::read_unaligned(mem_addr as *const __m128i) } @@ -34323,7 +35989,8 @@ pub unsafe fn _mm_loadu_epi64(mem_addr: *const i64) -> __m128i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 -pub unsafe fn _mm512_storeu_epi64(mem_addr: *mut i64, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_storeu_epi64(mem_addr: *mut i64, a: __m512i) { ptr::write_unaligned(mem_addr as *mut __m512i, a); } @@ -34334,7 +36001,8 @@ pub unsafe fn _mm512_storeu_epi64(mem_addr: *mut i64, a: __m512i) { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 -pub unsafe fn _mm256_storeu_epi64(mem_addr: *mut i64, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_storeu_epi64(mem_addr: *mut i64, a: __m256i) { ptr::write_unaligned(mem_addr as *mut __m256i, a); } @@ -34345,7 +36013,8 @@ pub unsafe fn _mm256_storeu_epi64(mem_addr: *mut i64, a: __m256i) { #[target_feature(enable = "avx512f,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 -pub unsafe fn _mm_storeu_epi64(mem_addr: *mut i64, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeu_epi64(mem_addr: *mut i64, a: __m128i) { ptr::write_unaligned(mem_addr as *mut __m128i, a); } @@ -34356,7 +36025,8 @@ pub unsafe fn _mm_storeu_epi64(mem_addr: *mut i64, a: __m128i) { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 -pub unsafe fn _mm512_loadu_si512(mem_addr: *const __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_loadu_si512(mem_addr: *const __m512i) -> __m512i { ptr::read_unaligned(mem_addr) } @@ -34367,7 +36037,8 @@ pub unsafe fn _mm512_loadu_si512(mem_addr: *const __m512i) -> __m512i { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 -pub unsafe fn _mm512_storeu_si512(mem_addr: *mut __m512i, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_storeu_si512(mem_addr: *mut __m512i, a: __m512i) { ptr::write_unaligned(mem_addr, a); } @@ -34380,7 +36051,8 @@ pub unsafe fn _mm512_storeu_si512(mem_addr: *mut __m512i, a: __m512i) { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] -pub unsafe fn _mm512_loadu_pd(mem_addr: *const f64) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_loadu_pd(mem_addr: *const f64) -> __m512d { ptr::read_unaligned(mem_addr as *const __m512d) } @@ -34393,7 +36065,8 @@ pub unsafe fn _mm512_loadu_pd(mem_addr: *const f64) -> __m512d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] -pub unsafe fn _mm512_storeu_pd(mem_addr: *mut f64, a: __m512d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_storeu_pd(mem_addr: *mut f64, a: __m512d) { ptr::write_unaligned(mem_addr as *mut __m512d, a); } @@ -34406,7 +36079,8 @@ pub unsafe fn _mm512_storeu_pd(mem_addr: *mut f64, a: __m512d) { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] -pub unsafe fn _mm512_loadu_ps(mem_addr: *const f32) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_loadu_ps(mem_addr: *const f32) -> __m512 { ptr::read_unaligned(mem_addr as *const __m512) } @@ -34419,7 +36093,8 @@ pub unsafe fn _mm512_loadu_ps(mem_addr: *const f32) -> __m512 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovups))] -pub unsafe fn _mm512_storeu_ps(mem_addr: *mut f32, a: __m512) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_storeu_ps(mem_addr: *mut f32, a: __m512) { ptr::write_unaligned(mem_addr as *mut __m512, a); } @@ -34433,7 +36108,8 @@ pub unsafe fn _mm512_storeu_ps(mem_addr: *mut f32, a: __m512) { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa32 -pub unsafe fn _mm512_load_si512(mem_addr: *const __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_load_si512(mem_addr: *const __m512i) -> __m512i { ptr::read(mem_addr) } @@ -34447,7 +36123,8 @@ pub unsafe fn _mm512_load_si512(mem_addr: *const __m512i) -> __m512i { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa32 -pub unsafe fn _mm512_store_si512(mem_addr: *mut __m512i, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_store_si512(mem_addr: *mut __m512i, a: __m512i) { ptr::write(mem_addr, a); } @@ -34461,7 +36138,8 @@ pub unsafe fn _mm512_store_si512(mem_addr: *mut __m512i, a: __m512i) { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa32 -pub unsafe fn _mm512_load_epi32(mem_addr: *const i32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_load_epi32(mem_addr: *const i32) -> __m512i { ptr::read(mem_addr as *const __m512i) } @@ -34475,7 +36153,8 @@ pub unsafe fn _mm512_load_epi32(mem_addr: *const i32) -> __m512i { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa32 -pub unsafe fn _mm256_load_epi32(mem_addr: *const i32) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_load_epi32(mem_addr: *const i32) -> __m256i { ptr::read(mem_addr as *const __m256i) } @@ -34489,7 +36168,8 @@ pub unsafe fn _mm256_load_epi32(mem_addr: *const i32) -> __m256i { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa32 -pub unsafe fn _mm_load_epi32(mem_addr: *const i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load_epi32(mem_addr: *const i32) -> __m128i { ptr::read(mem_addr as *const __m128i) } @@ -34503,7 +36183,8 @@ pub unsafe fn _mm_load_epi32(mem_addr: *const i32) -> __m128i { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa32 -pub unsafe fn _mm512_store_epi32(mem_addr: *mut i32, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_store_epi32(mem_addr: *mut i32, a: __m512i) { ptr::write(mem_addr as *mut __m512i, a); } @@ -34517,7 +36198,8 @@ pub unsafe fn _mm512_store_epi32(mem_addr: *mut i32, a: __m512i) { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa32 -pub unsafe fn _mm256_store_epi32(mem_addr: *mut i32, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_store_epi32(mem_addr: *mut i32, a: __m256i) { ptr::write(mem_addr as *mut __m256i, a); } @@ -34531,7 +36213,8 @@ pub unsafe fn _mm256_store_epi32(mem_addr: *mut i32, a: __m256i) { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa32 -pub unsafe fn _mm_store_epi32(mem_addr: *mut i32, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store_epi32(mem_addr: *mut i32, a: __m128i) { ptr::write(mem_addr as *mut __m128i, a); } @@ -34545,7 +36228,8 @@ pub unsafe fn _mm_store_epi32(mem_addr: *mut i32, a: __m128i) { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa64 -pub unsafe fn _mm512_load_epi64(mem_addr: *const i64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_load_epi64(mem_addr: *const i64) -> __m512i { ptr::read(mem_addr as *const __m512i) } @@ -34559,7 +36243,8 @@ pub unsafe fn _mm512_load_epi64(mem_addr: *const i64) -> __m512i { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa64 -pub unsafe fn _mm256_load_epi64(mem_addr: *const i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_load_epi64(mem_addr: *const i64) -> __m256i { ptr::read(mem_addr as *const __m256i) } @@ -34573,7 +36258,8 @@ pub unsafe fn _mm256_load_epi64(mem_addr: *const i64) -> __m256i { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa64 -pub unsafe fn _mm_load_epi64(mem_addr: *const i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load_epi64(mem_addr: *const i64) -> __m128i { ptr::read(mem_addr as *const __m128i) } @@ -34587,7 +36273,8 @@ pub unsafe fn _mm_load_epi64(mem_addr: *const i64) -> __m128i { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa64 -pub unsafe fn _mm512_store_epi64(mem_addr: *mut i64, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_store_epi64(mem_addr: *mut i64, a: __m512i) { ptr::write(mem_addr as *mut __m512i, a); } @@ -34601,7 +36288,8 @@ pub unsafe fn _mm512_store_epi64(mem_addr: *mut i64, a: __m512i) { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa64 -pub unsafe fn _mm256_store_epi64(mem_addr: *mut i64, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_store_epi64(mem_addr: *mut i64, a: __m256i) { ptr::write(mem_addr as *mut __m256i, a); } @@ -34615,7 +36303,8 @@ pub unsafe fn _mm256_store_epi64(mem_addr: *mut i64, a: __m256i) { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovdqa64 -pub unsafe fn _mm_store_epi64(mem_addr: *mut i64, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store_epi64(mem_addr: *mut i64, a: __m128i) { ptr::write(mem_addr as *mut __m128i, a); } @@ -34629,7 +36318,8 @@ pub unsafe fn _mm_store_epi64(mem_addr: *mut i64, a: __m128i) { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] -pub unsafe fn _mm512_load_ps(mem_addr: *const f32) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_load_ps(mem_addr: *const f32) -> __m512 { ptr::read(mem_addr as *const __m512) } @@ -34643,7 +36333,8 @@ pub unsafe fn _mm512_load_ps(mem_addr: *const f32) -> __m512 { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] -pub unsafe fn _mm512_store_ps(mem_addr: *mut f32, a: __m512) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_store_ps(mem_addr: *mut f32, a: __m512) { ptr::write(mem_addr as *mut __m512, a); } @@ -34657,7 +36348,8 @@ pub unsafe fn _mm512_store_ps(mem_addr: *mut f32, a: __m512) { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovapd -pub unsafe fn _mm512_load_pd(mem_addr: *const f64) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_load_pd(mem_addr: *const f64) -> __m512d { ptr::read(mem_addr as *const __m512d) } @@ -34671,7 +36363,8 @@ pub unsafe fn _mm512_load_pd(mem_addr: *const f64) -> __m512d { all(test, not(all(target_arch = "x86", target_env = "msvc"))), assert_instr(vmovaps) )] //should be vmovapd -pub unsafe fn _mm512_store_pd(mem_addr: *mut f64, a: __m512d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_store_pd(mem_addr: *mut f64, a: __m512d) { ptr::write(mem_addr as *mut __m512d, a); } @@ -34684,7 +36377,12 @@ pub unsafe fn _mm512_store_pd(mem_addr: *mut f64, a: __m512d) { #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqu32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_loadu_epi32(src: __m512i, k: __mmask16, mem_addr: *const i32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_loadu_epi32( + src: __m512i, + k: __mmask16, + mem_addr: *const i32, +) -> __m512i { let mask = simd_select_bitmask(k, i32x16::splat(!0), i32x16::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i32x16()).as_m512i() } @@ -34698,7 +36396,8 @@ pub unsafe fn _mm512_mask_loadu_epi32(src: __m512i, k: __mmask16, mem_addr: *con #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqu32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_maskz_loadu_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_maskz_loadu_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i { _mm512_mask_loadu_epi32(_mm512_setzero_si512(), k, mem_addr) } @@ -34711,7 +36410,12 @@ pub unsafe fn _mm512_maskz_loadu_epi32(k: __mmask16, mem_addr: *const i32) -> __ #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqu64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_loadu_epi64(src: __m512i, k: __mmask8, mem_addr: *const i64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_loadu_epi64( + src: __m512i, + k: __mmask8, + mem_addr: *const i64, +) -> __m512i { let mask = simd_select_bitmask(k, i64x8::splat(!0), i64x8::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i64x8()).as_m512i() } @@ -34725,7 +36429,8 @@ pub unsafe fn _mm512_mask_loadu_epi64(src: __m512i, k: __mmask8, mem_addr: *cons #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqu64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i { _mm512_mask_loadu_epi64(_mm512_setzero_si512(), k, mem_addr) } @@ -34738,7 +36443,12 @@ pub unsafe fn _mm512_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_loadu_ps(src: __m512, k: __mmask16, mem_addr: *const f32) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_loadu_ps( + src: __m512, + k: __mmask16, + mem_addr: *const f32, +) -> __m512 { let mask = simd_select_bitmask(k, i32x16::splat(!0), i32x16::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_f32x16()).as_m512() } @@ -34752,7 +36462,8 @@ pub unsafe fn _mm512_mask_loadu_ps(src: __m512, k: __mmask16, mem_addr: *const f #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_maskz_loadu_ps(k: __mmask16, mem_addr: *const f32) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_maskz_loadu_ps(k: __mmask16, mem_addr: *const f32) -> __m512 { _mm512_mask_loadu_ps(_mm512_setzero_ps(), k, mem_addr) } @@ -34765,7 +36476,12 @@ pub unsafe fn _mm512_maskz_loadu_ps(k: __mmask16, mem_addr: *const f32) -> __m51 #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovupd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_loadu_pd(src: __m512d, k: __mmask8, mem_addr: *const f64) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_loadu_pd( + src: __m512d, + k: __mmask8, + mem_addr: *const f64, +) -> __m512d { let mask = simd_select_bitmask(k, i64x8::splat(!0), i64x8::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_f64x8()).as_m512d() } @@ -34779,7 +36495,8 @@ pub unsafe fn _mm512_mask_loadu_pd(src: __m512d, k: __mmask8, mem_addr: *const f #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovupd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m512d { _mm512_mask_loadu_pd(_mm512_setzero_pd(), k, mem_addr) } @@ -34792,7 +36509,12 @@ pub unsafe fn _mm512_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m512 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_loadu_epi32(src: __m256i, k: __mmask8, mem_addr: *const i32) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_loadu_epi32( + src: __m256i, + k: __mmask8, + mem_addr: *const i32, +) -> __m256i { let mask = simd_select_bitmask(k, i32x8::splat(!0), i32x8::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i32x8()).as_m256i() } @@ -34806,7 +36528,8 @@ pub unsafe fn _mm256_mask_loadu_epi32(src: __m256i, k: __mmask8, mem_addr: *cons #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i { _mm256_mask_loadu_epi32(_mm256_setzero_si256(), k, mem_addr) } @@ -34819,7 +36542,12 @@ pub unsafe fn _mm256_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_loadu_epi64(src: __m256i, k: __mmask8, mem_addr: *const i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_loadu_epi64( + src: __m256i, + k: __mmask8, + mem_addr: *const i64, +) -> __m256i { let mask = simd_select_bitmask(k, i64x4::splat(!0), i64x4::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i64x4()).as_m256i() } @@ -34833,7 +36561,8 @@ pub unsafe fn _mm256_mask_loadu_epi64(src: __m256i, k: __mmask8, mem_addr: *cons #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i { _mm256_mask_loadu_epi64(_mm256_setzero_si256(), k, mem_addr) } @@ -34846,7 +36575,8 @@ pub unsafe fn _mm256_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_loadu_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_loadu_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 { let mask = simd_select_bitmask(k, i32x8::splat(!0), i32x8::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_f32x8()).as_m256() } @@ -34860,7 +36590,8 @@ pub unsafe fn _mm256_mask_loadu_ps(src: __m256, k: __mmask8, mem_addr: *const f3 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m256 { _mm256_mask_loadu_ps(_mm256_setzero_ps(), k, mem_addr) } @@ -34873,7 +36604,12 @@ pub unsafe fn _mm256_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m256 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovupd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_loadu_pd(src: __m256d, k: __mmask8, mem_addr: *const f64) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_loadu_pd( + src: __m256d, + k: __mmask8, + mem_addr: *const f64, +) -> __m256d { let mask = simd_select_bitmask(k, i64x4::splat(!0), i64x4::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_f64x4()).as_m256d() } @@ -34887,7 +36623,8 @@ pub unsafe fn _mm256_mask_loadu_pd(src: __m256d, k: __mmask8, mem_addr: *const f #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovupd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m256d { _mm256_mask_loadu_pd(_mm256_setzero_pd(), k, mem_addr) } @@ -34900,7 +36637,12 @@ pub unsafe fn _mm256_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m256 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_loadu_epi32(src: __m128i, k: __mmask8, mem_addr: *const i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_loadu_epi32( + src: __m128i, + k: __mmask8, + mem_addr: *const i32, +) -> __m128i { let mask = simd_select_bitmask(k, i32x4::splat(!0), i32x4::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i32x4()).as_m128i() } @@ -34914,7 +36656,8 @@ pub unsafe fn _mm_mask_loadu_epi32(src: __m128i, k: __mmask8, mem_addr: *const i #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i { _mm_mask_loadu_epi32(_mm_setzero_si128(), k, mem_addr) } @@ -34927,7 +36670,12 @@ pub unsafe fn _mm_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m128 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_loadu_epi64(src: __m128i, k: __mmask8, mem_addr: *const i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_loadu_epi64( + src: __m128i, + k: __mmask8, + mem_addr: *const i64, +) -> __m128i { let mask = simd_select_bitmask(k, i64x2::splat(!0), i64x2::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_i64x2()).as_m128i() } @@ -34941,7 +36689,8 @@ pub unsafe fn _mm_mask_loadu_epi64(src: __m128i, k: __mmask8, mem_addr: *const i #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i { _mm_mask_loadu_epi64(_mm_setzero_si128(), k, mem_addr) } @@ -34954,7 +36703,8 @@ pub unsafe fn _mm_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m128 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_loadu_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_loadu_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 { let mask = simd_select_bitmask(k, i32x4::splat(!0), i32x4::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_f32x4()).as_m128() } @@ -34968,7 +36718,8 @@ pub unsafe fn _mm_mask_loadu_ps(src: __m128, k: __mmask8, mem_addr: *const f32) #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { _mm_mask_loadu_ps(_mm_setzero_ps(), k, mem_addr) } @@ -34981,7 +36732,8 @@ pub unsafe fn _mm_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovupd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_loadu_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_loadu_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d { let mask = simd_select_bitmask(k, i64x2::splat(!0), i64x2::ZERO); simd_masked_load!(SimdAlign::Unaligned, mask, mem_addr, src.as_f64x2()).as_m128d() } @@ -34995,7 +36747,8 @@ pub unsafe fn _mm_mask_loadu_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovupd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { _mm_mask_loadu_pd(_mm_setzero_pd(), k, mem_addr) } @@ -35008,7 +36761,12 @@ pub unsafe fn _mm_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqa32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_load_epi32(src: __m512i, k: __mmask16, mem_addr: *const i32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_load_epi32( + src: __m512i, + k: __mmask16, + mem_addr: *const i32, +) -> __m512i { let mask = simd_select_bitmask(k, i32x16::splat(!0), i32x16::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_i32x16()).as_m512i() } @@ -35022,7 +36780,8 @@ pub unsafe fn _mm512_mask_load_epi32(src: __m512i, k: __mmask16, mem_addr: *cons #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqa32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_maskz_load_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_maskz_load_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i { _mm512_mask_load_epi32(_mm512_setzero_si512(), k, mem_addr) } @@ -35035,7 +36794,12 @@ pub unsafe fn _mm512_maskz_load_epi32(k: __mmask16, mem_addr: *const i32) -> __m #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqa64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_load_epi64(src: __m512i, k: __mmask8, mem_addr: *const i64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_load_epi64( + src: __m512i, + k: __mmask8, + mem_addr: *const i64, +) -> __m512i { let mask = simd_select_bitmask(k, i64x8::splat(!0), i64x8::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_i64x8()).as_m512i() } @@ -35049,7 +36813,8 @@ pub unsafe fn _mm512_mask_load_epi64(src: __m512i, k: __mmask8, mem_addr: *const #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqa64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i { _mm512_mask_load_epi64(_mm512_setzero_si512(), k, mem_addr) } @@ -35062,7 +36827,8 @@ pub unsafe fn _mm512_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m5 #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovaps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_load_ps(src: __m512, k: __mmask16, mem_addr: *const f32) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_load_ps(src: __m512, k: __mmask16, mem_addr: *const f32) -> __m512 { let mask = simd_select_bitmask(k, i32x16::splat(!0), i32x16::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_f32x16()).as_m512() } @@ -35076,7 +36842,8 @@ pub unsafe fn _mm512_mask_load_ps(src: __m512, k: __mmask16, mem_addr: *const f3 #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovaps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_maskz_load_ps(k: __mmask16, mem_addr: *const f32) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_maskz_load_ps(k: __mmask16, mem_addr: *const f32) -> __m512 { _mm512_mask_load_ps(_mm512_setzero_ps(), k, mem_addr) } @@ -35089,7 +36856,12 @@ pub unsafe fn _mm512_maskz_load_ps(k: __mmask16, mem_addr: *const f32) -> __m512 #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovapd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_load_pd(src: __m512d, k: __mmask8, mem_addr: *const f64) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_load_pd( + src: __m512d, + k: __mmask8, + mem_addr: *const f64, +) -> __m512d { let mask = simd_select_bitmask(k, i64x8::splat(!0), i64x8::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_f64x8()).as_m512d() } @@ -35103,7 +36875,8 @@ pub unsafe fn _mm512_mask_load_pd(src: __m512d, k: __mmask8, mem_addr: *const f6 #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovapd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m512d { _mm512_mask_load_pd(_mm512_setzero_pd(), k, mem_addr) } @@ -35116,7 +36889,12 @@ pub unsafe fn _mm512_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m512d #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_load_epi32(src: __m256i, k: __mmask8, mem_addr: *const i32) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_load_epi32( + src: __m256i, + k: __mmask8, + mem_addr: *const i32, +) -> __m256i { let mask = simd_select_bitmask(k, i32x8::splat(!0), i32x8::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_i32x8()).as_m256i() } @@ -35130,7 +36908,8 @@ pub unsafe fn _mm256_mask_load_epi32(src: __m256i, k: __mmask8, mem_addr: *const #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i { _mm256_mask_load_epi32(_mm256_setzero_si256(), k, mem_addr) } @@ -35143,7 +36922,12 @@ pub unsafe fn _mm256_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_load_epi64(src: __m256i, k: __mmask8, mem_addr: *const i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_load_epi64( + src: __m256i, + k: __mmask8, + mem_addr: *const i64, +) -> __m256i { let mask = simd_select_bitmask(k, i64x4::splat(!0), i64x4::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_i64x4()).as_m256i() } @@ -35157,7 +36941,8 @@ pub unsafe fn _mm256_mask_load_epi64(src: __m256i, k: __mmask8, mem_addr: *const #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i { _mm256_mask_load_epi64(_mm256_setzero_si256(), k, mem_addr) } @@ -35170,7 +36955,8 @@ pub unsafe fn _mm256_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m2 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovaps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_load_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_load_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 { let mask = simd_select_bitmask(k, i32x8::splat(!0), i32x8::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_f32x8()).as_m256() } @@ -35184,7 +36970,8 @@ pub unsafe fn _mm256_mask_load_ps(src: __m256, k: __mmask8, mem_addr: *const f32 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovaps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m256 { _mm256_mask_load_ps(_mm256_setzero_ps(), k, mem_addr) } @@ -35197,7 +36984,12 @@ pub unsafe fn _mm256_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m256 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovapd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_load_pd(src: __m256d, k: __mmask8, mem_addr: *const f64) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_load_pd( + src: __m256d, + k: __mmask8, + mem_addr: *const f64, +) -> __m256d { let mask = simd_select_bitmask(k, i64x4::splat(!0), i64x4::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_f64x4()).as_m256d() } @@ -35211,7 +37003,8 @@ pub unsafe fn _mm256_mask_load_pd(src: __m256d, k: __mmask8, mem_addr: *const f6 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovapd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m256d { _mm256_mask_load_pd(_mm256_setzero_pd(), k, mem_addr) } @@ -35224,7 +37017,12 @@ pub unsafe fn _mm256_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m256d #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_load_epi32(src: __m128i, k: __mmask8, mem_addr: *const i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_load_epi32( + src: __m128i, + k: __mmask8, + mem_addr: *const i32, +) -> __m128i { let mask = simd_select_bitmask(k, i32x4::splat(!0), i32x4::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_i32x4()).as_m128i() } @@ -35238,7 +37036,8 @@ pub unsafe fn _mm_mask_load_epi32(src: __m128i, k: __mmask8, mem_addr: *const i3 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i { _mm_mask_load_epi32(_mm_setzero_si128(), k, mem_addr) } @@ -35251,7 +37050,12 @@ pub unsafe fn _mm_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_load_epi64(src: __m128i, k: __mmask8, mem_addr: *const i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_load_epi64( + src: __m128i, + k: __mmask8, + mem_addr: *const i64, +) -> __m128i { let mask = simd_select_bitmask(k, i64x2::splat(!0), i64x2::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_i64x2()).as_m128i() } @@ -35265,7 +37069,8 @@ pub unsafe fn _mm_mask_load_epi64(src: __m128i, k: __mmask8, mem_addr: *const i6 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i { _mm_mask_load_epi64(_mm_setzero_si128(), k, mem_addr) } @@ -35278,7 +37083,8 @@ pub unsafe fn _mm_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovaps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_load_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_load_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 { let mask = simd_select_bitmask(k, i32x4::splat(!0), i32x4::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_f32x4()).as_m128() } @@ -35292,7 +37098,8 @@ pub unsafe fn _mm_mask_load_ps(src: __m128, k: __mmask8, mem_addr: *const f32) - #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovaps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { _mm_mask_load_ps(_mm_setzero_ps(), k, mem_addr) } @@ -35305,7 +37112,8 @@ pub unsafe fn _mm_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovapd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_load_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_load_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d { let mask = simd_select_bitmask(k, i64x2::splat(!0), i64x2::ZERO); simd_masked_load!(SimdAlign::Vector, mask, mem_addr, src.as_f64x2()).as_m128d() } @@ -35319,7 +37127,8 @@ pub unsafe fn _mm_mask_load_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovapd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { _mm_mask_load_pd(_mm_setzero_pd(), k, mem_addr) } @@ -35419,7 +37228,8 @@ pub unsafe fn _mm_maskz_load_sd(k: __mmask8, mem_addr: *const f64) -> __m128d { #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqu32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask16, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask16, a: __m512i) { let mask = simd_select_bitmask(mask, i32x16::splat(!0), i32x16::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i32x16()); } @@ -35432,7 +37242,8 @@ pub unsafe fn _mm512_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask16, a: _ #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqu64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m512i) { let mask = simd_select_bitmask(mask, i64x8::splat(!0), i64x8::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i64x8()); } @@ -35445,7 +37256,8 @@ pub unsafe fn _mm512_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __ #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask16, a: __m512) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask16, a: __m512) { let mask = simd_select_bitmask(mask, i32x16::splat(!0), i32x16::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_f32x16()); } @@ -35458,7 +37270,8 @@ pub unsafe fn _mm512_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask16, a: __m5 #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovupd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512d) { let mask = simd_select_bitmask(mask, i64x8::splat(!0), i64x8::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_f64x8()); } @@ -35471,7 +37284,8 @@ pub unsafe fn _mm512_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m51 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m256i) { let mask = simd_select_bitmask(mask, i32x8::splat(!0), i32x8::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i32x8()); } @@ -35484,7 +37298,8 @@ pub unsafe fn _mm256_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __ #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m256i) { let mask = simd_select_bitmask(mask, i64x4::splat(!0), i64x4::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i64x4()); } @@ -35497,7 +37312,8 @@ pub unsafe fn _mm256_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __ #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256) { let mask = simd_select_bitmask(mask, i32x8::splat(!0), i32x8::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_f32x8()); } @@ -35510,7 +37326,8 @@ pub unsafe fn _mm256_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m25 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovupd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256d) { let mask = simd_select_bitmask(mask, i64x4::splat(!0), i64x4::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_f64x4()); } @@ -35523,7 +37340,8 @@ pub unsafe fn _mm256_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m25 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128i) { let mask = simd_select_bitmask(mask, i32x4::splat(!0), i32x4::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i32x4()); } @@ -35536,7 +37354,8 @@ pub unsafe fn _mm_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m12 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqu64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128i) { let mask = simd_select_bitmask(mask, i64x2::splat(!0), i64x2::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_i64x2()); } @@ -35549,7 +37368,8 @@ pub unsafe fn _mm_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m12 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) { let mask = simd_select_bitmask(mask, i32x4::splat(!0), i32x4::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_f32x4()); } @@ -35562,7 +37382,8 @@ pub unsafe fn _mm_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovupd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) { let mask = simd_select_bitmask(mask, i64x2::splat(!0), i64x2::ZERO); simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, a.as_f64x2()); } @@ -35575,7 +37396,8 @@ pub unsafe fn _mm_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqa32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_store_epi32(mem_addr: *mut i32, mask: __mmask16, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_store_epi32(mem_addr: *mut i32, mask: __mmask16, a: __m512i) { let mask = simd_select_bitmask(mask, i32x16::splat(!0), i32x16::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_i32x16()); } @@ -35588,7 +37410,8 @@ pub unsafe fn _mm512_mask_store_epi32(mem_addr: *mut i32, mask: __mmask16, a: __ #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovdqa64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m512i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m512i) { let mask = simd_select_bitmask(mask, i64x8::splat(!0), i64x8::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_i64x8()); } @@ -35601,7 +37424,8 @@ pub unsafe fn _mm512_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovaps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_store_ps(mem_addr: *mut f32, mask: __mmask16, a: __m512) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_store_ps(mem_addr: *mut f32, mask: __mmask16, a: __m512) { let mask = simd_select_bitmask(mask, i32x16::splat(!0), i32x16::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_f32x16()); } @@ -35614,7 +37438,8 @@ pub unsafe fn _mm512_mask_store_ps(mem_addr: *mut f32, mask: __mmask16, a: __m51 #[target_feature(enable = "avx512f")] #[cfg_attr(test, assert_instr(vmovapd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm512_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512d) { let mask = simd_select_bitmask(mask, i64x8::splat(!0), i64x8::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_f64x8()); } @@ -35627,7 +37452,8 @@ pub unsafe fn _mm512_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m256i) { let mask = simd_select_bitmask(mask, i32x8::splat(!0), i32x8::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_i32x8()); } @@ -35640,7 +37466,8 @@ pub unsafe fn _mm256_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m256i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m256i) { let mask = simd_select_bitmask(mask, i64x4::splat(!0), i64x4::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_i64x4()); } @@ -35653,7 +37480,8 @@ pub unsafe fn _mm256_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovaps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256) { let mask = simd_select_bitmask(mask, i32x8::splat(!0), i32x8::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_f32x8()); } @@ -35666,7 +37494,8 @@ pub unsafe fn _mm256_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovapd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm256_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256d) { let mask = simd_select_bitmask(mask, i64x4::splat(!0), i64x4::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_f64x4()); } @@ -35679,7 +37508,8 @@ pub unsafe fn _mm256_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa32))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128i) { let mask = simd_select_bitmask(mask, i32x4::splat(!0), i32x4::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_i32x4()); } @@ -35692,7 +37522,8 @@ pub unsafe fn _mm_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovdqa64))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128i) { let mask = simd_select_bitmask(mask, i64x2::splat(!0), i64x2::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_i64x2()); } @@ -35705,7 +37536,8 @@ pub unsafe fn _mm_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128 #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovaps))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) { let mask = simd_select_bitmask(mask, i32x4::splat(!0), i32x4::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_f32x4()); } @@ -35718,7 +37550,8 @@ pub unsafe fn _mm_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) { #[target_feature(enable = "avx512f,avx512vl")] #[cfg_attr(test, assert_instr(vmovapd))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub unsafe fn _mm_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) { let mask = simd_select_bitmask(mask, i64x2::splat(!0), i64x2::ZERO); simd_masked_store!(SimdAlign::Vector, mask, mem_addr, a.as_f64x2()); } @@ -36065,7 +37898,8 @@ pub unsafe fn _mm_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) -> __m #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_setr_pd( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setr_pd( e0: f64, e1: f64, e2: f64, @@ -36087,7 +37921,8 @@ pub fn _mm512_setr_pd( #[inline] #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _mm512_set_pd( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set_pd( e0: f64, e1: f64, e2: f64, @@ -36107,7 +37942,8 @@ pub fn _mm512_set_pd( #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovss))] -pub fn _mm_mask_move_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_move_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let extractsrc: f32 = simd_extract!(src, 0); let mut mov: f32 = extractsrc; @@ -36125,7 +37961,8 @@ pub fn _mm_mask_move_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovss))] -pub fn _mm_maskz_move_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_move_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let mut mov: f32 = 0.; if (k & 0b00000001) != 0 { @@ -36142,7 +37979,8 @@ pub fn _mm_maskz_move_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovsd))] -pub fn _mm_mask_move_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_move_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let extractsrc: f64 = simd_extract!(src, 0); let mut mov: f64 = extractsrc; @@ -36160,7 +37998,8 @@ pub fn _mm_mask_move_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmovsd))] -pub fn _mm_maskz_move_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_move_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let mut mov: f64 = 0.; if (k & 0b00000001) != 0 { @@ -36177,7 +38016,8 @@ pub fn _mm_maskz_move_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddss))] -pub fn _mm_mask_add_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_add_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let extractsrc: f32 = simd_extract!(src, 0); let mut add: f32 = extractsrc; @@ -36197,7 +38037,8 @@ pub fn _mm_mask_add_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddss))] -pub fn _mm_maskz_add_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_add_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let mut add: f32 = 0.; if (k & 0b00000001) != 0 { @@ -36216,7 +38057,8 @@ pub fn _mm_maskz_add_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddsd))] -pub fn _mm_mask_add_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_add_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let extractsrc: f64 = simd_extract!(src, 0); let mut add: f64 = extractsrc; @@ -36236,7 +38078,8 @@ pub fn _mm_mask_add_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vaddsd))] -pub fn _mm_maskz_add_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_add_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let mut add: f64 = 0.; if (k & 0b00000001) != 0 { @@ -36255,7 +38098,8 @@ pub fn _mm_maskz_add_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubss))] -pub fn _mm_mask_sub_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sub_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let extractsrc: f32 = simd_extract!(src, 0); let mut add: f32 = extractsrc; @@ -36275,7 +38119,8 @@ pub fn _mm_mask_sub_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubss))] -pub fn _mm_maskz_sub_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sub_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let mut add: f32 = 0.; if (k & 0b00000001) != 0 { @@ -36294,7 +38139,8 @@ pub fn _mm_maskz_sub_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubsd))] -pub fn _mm_mask_sub_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sub_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let extractsrc: f64 = simd_extract!(src, 0); let mut add: f64 = extractsrc; @@ -36314,7 +38160,8 @@ pub fn _mm_mask_sub_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vsubsd))] -pub fn _mm_maskz_sub_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sub_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let mut add: f64 = 0.; if (k & 0b00000001) != 0 { @@ -36333,7 +38180,8 @@ pub fn _mm_maskz_sub_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulss))] -pub fn _mm_mask_mul_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mul_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let extractsrc: f32 = simd_extract!(src, 0); let mut add: f32 = extractsrc; @@ -36353,7 +38201,8 @@ pub fn _mm_mask_mul_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulss))] -pub fn _mm_maskz_mul_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mul_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let mut add: f32 = 0.; if (k & 0b00000001) != 0 { @@ -36372,7 +38221,8 @@ pub fn _mm_maskz_mul_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulsd))] -pub fn _mm_mask_mul_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mul_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let extractsrc: f64 = simd_extract!(src, 0); let mut add: f64 = extractsrc; @@ -36392,7 +38242,8 @@ pub fn _mm_mask_mul_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vmulsd))] -pub fn _mm_maskz_mul_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mul_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let mut add: f64 = 0.; if (k & 0b00000001) != 0 { @@ -36411,7 +38262,8 @@ pub fn _mm_maskz_mul_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivss))] -pub fn _mm_mask_div_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_div_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let extractsrc: f32 = simd_extract!(src, 0); let mut add: f32 = extractsrc; @@ -36431,7 +38283,8 @@ pub fn _mm_mask_div_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivss))] -pub fn _mm_maskz_div_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_div_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { unsafe { let mut add: f32 = 0.; if (k & 0b00000001) != 0 { @@ -36450,7 +38303,8 @@ pub fn _mm_maskz_div_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivsd))] -pub fn _mm_mask_div_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_div_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let extractsrc: f64 = simd_extract!(src, 0); let mut add: f64 = extractsrc; @@ -36470,7 +38324,8 @@ pub fn _mm_mask_div_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vdivsd))] -pub fn _mm_maskz_div_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_div_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { unsafe { let mut add: f64 = 0.; if (k & 0b00000001) != 0 { @@ -37454,7 +39309,8 @@ pub fn _mm_maskz_scalef_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] -pub fn _mm_mask_fmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { unsafe { let mut fmadd: f32 = simd_extract!(a, 0); if (k & 0b00000001) != 0 { @@ -37473,7 +39329,8 @@ pub fn _mm_mask_fmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] -pub fn _mm_maskz_fmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { let mut fmadd: f32 = 0.; if (k & 0b00000001) != 0 { @@ -37493,7 +39350,8 @@ pub fn _mm_maskz_fmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] -pub fn _mm_mask3_fmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { unsafe { let mut fmadd: f32 = simd_extract!(c, 0); if (k & 0b00000001) != 0 { @@ -37512,7 +39370,8 @@ pub fn _mm_mask3_fmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] -pub fn _mm_mask_fmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { unsafe { let mut fmadd: f64 = simd_extract!(a, 0); if (k & 0b00000001) != 0 { @@ -37531,7 +39390,8 @@ pub fn _mm_mask_fmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] -pub fn _mm_maskz_fmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { let mut fmadd: f64 = 0.; if (k & 0b00000001) != 0 { @@ -37551,7 +39411,8 @@ pub fn _mm_maskz_fmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmadd))] -pub fn _mm_mask3_fmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { unsafe { let mut fmadd: f64 = simd_extract!(c, 0); if (k & 0b00000001) != 0 { @@ -37570,7 +39431,8 @@ pub fn _mm_mask3_fmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] -pub fn _mm_mask_fmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { unsafe { let mut fmsub: f32 = simd_extract!(a, 0); if (k & 0b00000001) != 0 { @@ -37590,7 +39452,8 @@ pub fn _mm_mask_fmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] -pub fn _mm_maskz_fmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { let mut fmsub: f32 = 0.; if (k & 0b00000001) != 0 { @@ -37611,7 +39474,8 @@ pub fn _mm_maskz_fmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] -pub fn _mm_mask3_fmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { unsafe { let mut fmsub: f32 = simd_extract!(c, 0); if (k & 0b00000001) != 0 { @@ -37631,7 +39495,8 @@ pub fn _mm_mask3_fmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] -pub fn _mm_mask_fmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { unsafe { let mut fmsub: f64 = simd_extract!(a, 0); if (k & 0b00000001) != 0 { @@ -37651,7 +39516,8 @@ pub fn _mm_mask_fmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] -pub fn _mm_maskz_fmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { let mut fmsub: f64 = 0.; if (k & 0b00000001) != 0 { @@ -37672,7 +39538,8 @@ pub fn _mm_maskz_fmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfmsub))] -pub fn _mm_mask3_fmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { unsafe { let mut fmsub: f64 = simd_extract!(c, 0); if (k & 0b00000001) != 0 { @@ -37692,7 +39559,8 @@ pub fn _mm_mask3_fmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] -pub fn _mm_mask_fnmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { unsafe { let mut fnmadd: f32 = simd_extract!(a, 0); if (k & 0b00000001) != 0 { @@ -37712,7 +39580,8 @@ pub fn _mm_mask_fnmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] -pub fn _mm_maskz_fnmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { let mut fnmadd: f32 = 0.; if (k & 0b00000001) != 0 { @@ -37733,7 +39602,8 @@ pub fn _mm_maskz_fnmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] -pub fn _mm_mask3_fnmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { unsafe { let mut fnmadd: f32 = simd_extract!(c, 0); if (k & 0b00000001) != 0 { @@ -37753,7 +39623,8 @@ pub fn _mm_mask3_fnmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] -pub fn _mm_mask_fnmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { unsafe { let mut fnmadd: f64 = simd_extract!(a, 0); if (k & 0b00000001) != 0 { @@ -37773,7 +39644,8 @@ pub fn _mm_mask_fnmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] -pub fn _mm_maskz_fnmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { let mut fnmadd: f64 = 0.; if (k & 0b00000001) != 0 { @@ -37794,7 +39666,8 @@ pub fn _mm_maskz_fnmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmadd))] -pub fn _mm_mask3_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { unsafe { let mut fnmadd: f64 = simd_extract!(c, 0); if (k & 0b00000001) != 0 { @@ -37814,7 +39687,8 @@ pub fn _mm_mask3_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] -pub fn _mm_mask_fnmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { unsafe { let mut fnmsub: f32 = simd_extract!(a, 0); if (k & 0b00000001) != 0 { @@ -37835,7 +39709,8 @@ pub fn _mm_mask_fnmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m12 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] -pub fn _mm_maskz_fnmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { let mut fnmsub: f32 = 0.; if (k & 0b00000001) != 0 { @@ -37857,7 +39732,8 @@ pub fn _mm_maskz_fnmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] -pub fn _mm_mask3_fnmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { unsafe { let mut fnmsub: f32 = simd_extract!(c, 0); if (k & 0b00000001) != 0 { @@ -37878,7 +39754,8 @@ pub fn _mm_mask3_fnmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m1 #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] -pub fn _mm_mask_fnmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { unsafe { let mut fnmsub: f64 = simd_extract!(a, 0); if (k & 0b00000001) != 0 { @@ -37899,7 +39776,8 @@ pub fn _mm_mask_fnmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] -pub fn _mm_maskz_fnmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { let mut fnmsub: f64 = 0.; if (k & 0b00000001) != 0 { @@ -37921,7 +39799,8 @@ pub fn _mm_maskz_fnmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> _ #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vfnmsub))] -pub fn _mm_mask3_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { unsafe { let mut fnmsub: f64 = simd_extract!(c, 0); if (k & 0b00000001) != 0 { @@ -41501,7 +43380,8 @@ pub fn _mm_cvt_roundu32_ss(a: __m128, b: u32) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsi2ss))] -pub fn _mm_cvti32_ss(a: __m128, b: i32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvti32_ss(a: __m128, b: i32) -> __m128 { unsafe { let b = b as f32; simd_insert!(a, 0, b) @@ -41515,7 +43395,8 @@ pub fn _mm_cvti32_ss(a: __m128, b: i32) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsi2sd))] -pub fn _mm_cvti32_sd(a: __m128d, b: i32) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvti32_sd(a: __m128d, b: i32) -> __m128d { unsafe { let b = b as f64; simd_insert!(a, 0, b) @@ -41675,7 +43556,8 @@ pub fn _mm_cvttsd_u32(a: __m128d) -> u32 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtusi2ss))] -pub fn _mm_cvtu32_ss(a: __m128, b: u32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtu32_ss(a: __m128, b: u32) -> __m128 { unsafe { let b = b as f32; simd_insert!(a, 0, b) @@ -41689,7 +43571,8 @@ pub fn _mm_cvtu32_ss(a: __m128, b: u32) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtusi2sd))] -pub fn _mm_cvtu32_sd(a: __m128d, b: u32) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtu32_sd(a: __m128d, b: u32) -> __m128d { unsafe { let b = b as f64; simd_insert!(a, 0, b) @@ -43141,6 +45024,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; @@ -43149,7 +45033,7 @@ mod tests { use crate::mem::{self}; #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_abs_epi32() { + const unsafe fn test_mm512_abs_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43169,7 +45053,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_abs_epi32() { + const unsafe fn test_mm512_mask_abs_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43191,7 +45075,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_abs_epi32() { + const unsafe fn test_mm512_maskz_abs_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43213,7 +45097,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_abs_epi32() { + const unsafe fn test_mm256_mask_abs_epi32() { #[rustfmt::skip] let a = _mm256_setr_epi32( 0, 1, -1, i32::MAX, @@ -43231,7 +45115,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_abs_epi32() { + const unsafe fn test_mm256_maskz_abs_epi32() { #[rustfmt::skip] let a = _mm256_setr_epi32( 0, 1, -1, i32::MAX, @@ -43249,7 +45133,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_abs_epi32() { + const unsafe fn test_mm_mask_abs_epi32() { let a = _mm_setr_epi32(i32::MIN, 100, -100, -32); let r = _mm_mask_abs_epi32(a, 0, a); assert_eq_m128i(r, a); @@ -43259,7 +45143,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_abs_epi32() { + const unsafe fn test_mm_maskz_abs_epi32() { let a = _mm_setr_epi32(i32::MIN, 100, -100, -32); let r = _mm_maskz_abs_epi32(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -43269,7 +45153,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_abs_ps() { + const unsafe fn test_mm512_abs_ps() { #[rustfmt::skip] let a = _mm512_setr_ps( 0., 1., -1., f32::MAX, @@ -43289,7 +45173,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_abs_ps() { + const unsafe fn test_mm512_mask_abs_ps() { #[rustfmt::skip] let a = _mm512_setr_ps( 0., 1., -1., f32::MAX, @@ -43311,7 +45195,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_mov_epi32() { + const unsafe fn test_mm512_mask_mov_epi32() { let src = _mm512_set1_epi32(1); let a = _mm512_set1_epi32(2); let r = _mm512_mask_mov_epi32(src, 0, a); @@ -43321,7 +45205,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_mov_epi32() { + const unsafe fn test_mm512_maskz_mov_epi32() { let a = _mm512_set1_epi32(2); let r = _mm512_maskz_mov_epi32(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -43330,7 +45214,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_mov_epi32() { + const unsafe fn test_mm256_mask_mov_epi32() { let src = _mm256_set1_epi32(1); let a = _mm256_set1_epi32(2); let r = _mm256_mask_mov_epi32(src, 0, a); @@ -43340,7 +45224,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_mov_epi32() { + const unsafe fn test_mm256_maskz_mov_epi32() { let a = _mm256_set1_epi32(2); let r = _mm256_maskz_mov_epi32(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -43349,7 +45233,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_mov_epi32() { + const unsafe fn test_mm_mask_mov_epi32() { let src = _mm_set1_epi32(1); let a = _mm_set1_epi32(2); let r = _mm_mask_mov_epi32(src, 0, a); @@ -43359,7 +45243,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_mov_epi32() { + const unsafe fn test_mm_maskz_mov_epi32() { let a = _mm_set1_epi32(2); let r = _mm_maskz_mov_epi32(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -43368,7 +45252,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_mov_ps() { + const unsafe fn test_mm512_mask_mov_ps() { let src = _mm512_set1_ps(1.); let a = _mm512_set1_ps(2.); let r = _mm512_mask_mov_ps(src, 0, a); @@ -43378,7 +45262,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_mov_ps() { + const unsafe fn test_mm512_maskz_mov_ps() { let a = _mm512_set1_ps(2.); let r = _mm512_maskz_mov_ps(0, a); assert_eq_m512(r, _mm512_setzero_ps()); @@ -43387,7 +45271,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_mov_ps() { + const unsafe fn test_mm256_mask_mov_ps() { let src = _mm256_set1_ps(1.); let a = _mm256_set1_ps(2.); let r = _mm256_mask_mov_ps(src, 0, a); @@ -43397,7 +45281,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_mov_ps() { + const unsafe fn test_mm256_maskz_mov_ps() { let a = _mm256_set1_ps(2.); let r = _mm256_maskz_mov_ps(0, a); assert_eq_m256(r, _mm256_setzero_ps()); @@ -43406,7 +45290,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_mov_ps() { + const unsafe fn test_mm_mask_mov_ps() { let src = _mm_set1_ps(1.); let a = _mm_set1_ps(2.); let r = _mm_mask_mov_ps(src, 0, a); @@ -43416,7 +45300,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_mov_ps() { + const unsafe fn test_mm_maskz_mov_ps() { let a = _mm_set1_ps(2.); let r = _mm_maskz_mov_ps(0, a); assert_eq_m128(r, _mm_setzero_ps()); @@ -43425,7 +45309,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_add_epi32() { + const unsafe fn test_mm512_add_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43446,7 +45330,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_add_epi32() { + const unsafe fn test_mm512_mask_add_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43469,7 +45353,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_add_epi32() { + const unsafe fn test_mm512_maskz_add_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43492,7 +45376,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_add_epi32() { + const unsafe fn test_mm256_mask_add_epi32() { let a = _mm256_set_epi32(0, 1, -1, i32::MAX, i32::MIN, 100, -100, -32); let b = _mm256_set1_epi32(1); let r = _mm256_mask_add_epi32(a, 0, a, b); @@ -43503,7 +45387,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_add_epi32() { + const unsafe fn test_mm256_maskz_add_epi32() { let a = _mm256_setr_epi32(0, 1, -1, i32::MAX, i32::MIN, 100, -100, -32); let b = _mm256_set1_epi32(1); let r = _mm256_maskz_add_epi32(0, a, b); @@ -43514,7 +45398,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_add_epi32() { + const unsafe fn test_mm_mask_add_epi32() { let a = _mm_set_epi32(1, -1, i32::MAX, i32::MIN); let b = _mm_set1_epi32(1); let r = _mm_mask_add_epi32(a, 0, a, b); @@ -43525,7 +45409,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_add_epi32() { + const unsafe fn test_mm_maskz_add_epi32() { let a = _mm_setr_epi32(1, -1, i32::MAX, i32::MIN); let b = _mm_set1_epi32(1); let r = _mm_maskz_add_epi32(0, a, b); @@ -43536,7 +45420,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_add_ps() { + const unsafe fn test_mm512_add_ps() { #[rustfmt::skip] let a = _mm512_setr_ps( 0., 1., -1., f32::MAX, @@ -43557,7 +45441,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_add_ps() { + const unsafe fn test_mm512_mask_add_ps() { #[rustfmt::skip] let a = _mm512_setr_ps( 0., 1., -1., f32::MAX, @@ -43580,7 +45464,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_add_ps() { + const unsafe fn test_mm512_maskz_add_ps() { #[rustfmt::skip] let a = _mm512_setr_ps( 0., 1., -1., f32::MAX, @@ -43603,7 +45487,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_add_ps() { + const unsafe fn test_mm256_mask_add_ps() { let a = _mm256_set_ps(0., 1., -1., f32::MAX, f32::MIN, 100., -100., -32.); let b = _mm256_set1_ps(1.); let r = _mm256_mask_add_ps(a, 0, a, b); @@ -43614,7 +45498,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_add_ps() { + const unsafe fn test_mm256_maskz_add_ps() { let a = _mm256_set_ps(0., 1., -1., f32::MAX, f32::MIN, 100., -100., -32.); let b = _mm256_set1_ps(1.); let r = _mm256_maskz_add_ps(0, a, b); @@ -43625,7 +45509,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_add_ps() { + const unsafe fn test_mm_mask_add_ps() { let a = _mm_set_ps(1., -1., f32::MAX, f32::MIN); let b = _mm_set1_ps(1.); let r = _mm_mask_add_ps(a, 0, a, b); @@ -43636,7 +45520,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_add_ps() { + const unsafe fn test_mm_maskz_add_ps() { let a = _mm_set_ps(1., -1., f32::MAX, f32::MIN); let b = _mm_set1_ps(1.); let r = _mm_maskz_add_ps(0, a, b); @@ -43647,7 +45531,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_sub_epi32() { + const unsafe fn test_mm512_sub_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43668,7 +45552,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_sub_epi32() { + const unsafe fn test_mm512_mask_sub_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43691,7 +45575,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_sub_epi32() { + const unsafe fn test_mm512_maskz_sub_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43714,7 +45598,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_sub_epi32() { + const unsafe fn test_mm256_mask_sub_epi32() { let a = _mm256_set_epi32(0, 1, -1, i32::MAX, i32::MIN, 100, -100, -32); let b = _mm256_set1_epi32(1); let r = _mm256_mask_sub_epi32(a, 0, a, b); @@ -43725,7 +45609,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_sub_epi32() { + const unsafe fn test_mm256_maskz_sub_epi32() { let a = _mm256_set_epi32(0, 1, -1, i32::MAX, i32::MIN, 100, -100, -32); let b = _mm256_set1_epi32(1); let r = _mm256_maskz_sub_epi32(0, a, b); @@ -43736,7 +45620,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_sub_epi32() { + const unsafe fn test_mm_mask_sub_epi32() { let a = _mm_set_epi32(1, -1, i32::MAX, i32::MIN); let b = _mm_set1_epi32(1); let r = _mm_mask_sub_epi32(a, 0, a, b); @@ -43747,7 +45631,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_sub_epi32() { + const unsafe fn test_mm_maskz_sub_epi32() { let a = _mm_set_epi32(1, -1, i32::MAX, i32::MIN); let b = _mm_set1_epi32(1); let r = _mm_maskz_sub_epi32(0, a, b); @@ -43758,7 +45642,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_sub_ps() { + const unsafe fn test_mm512_sub_ps() { #[rustfmt::skip] let a = _mm512_setr_ps( 0., 1., -1., f32::MAX, @@ -43779,7 +45663,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_sub_ps() { + const unsafe fn test_mm512_mask_sub_ps() { #[rustfmt::skip] let a = _mm512_setr_ps( 0., 1., -1., f32::MAX, @@ -43802,7 +45686,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_sub_ps() { + const unsafe fn test_mm512_maskz_sub_ps() { #[rustfmt::skip] let a = _mm512_setr_ps( 0., 1., -1., f32::MAX, @@ -43825,7 +45709,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_sub_ps() { + const unsafe fn test_mm256_mask_sub_ps() { let a = _mm256_set_ps(0., 1., -1., f32::MAX, f32::MIN, 100., -100., -32.); let b = _mm256_set1_ps(1.); let r = _mm256_mask_sub_ps(a, 0, a, b); @@ -43836,7 +45720,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_sub_ps() { + const unsafe fn test_mm256_maskz_sub_ps() { let a = _mm256_set_ps(0., 1., -1., f32::MAX, f32::MIN, 100., -100., -32.); let b = _mm256_set1_ps(1.); let r = _mm256_maskz_sub_ps(0, a, b); @@ -43847,7 +45731,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_sub_ps() { + const unsafe fn test_mm_mask_sub_ps() { let a = _mm_set_ps(1., -1., f32::MAX, f32::MIN); let b = _mm_set1_ps(1.); let r = _mm_mask_sub_ps(a, 0, a, b); @@ -43858,7 +45742,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_sub_ps() { + const unsafe fn test_mm_maskz_sub_ps() { let a = _mm_set_ps(1., -1., f32::MAX, f32::MIN); let b = _mm_set1_ps(1.); let r = _mm_maskz_sub_ps(0, a, b); @@ -43869,7 +45753,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mullo_epi32() { + const unsafe fn test_mm512_mullo_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43886,7 +45770,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_mullo_epi32() { + const unsafe fn test_mm512_mask_mullo_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43909,7 +45793,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_mullo_epi32() { + const unsafe fn test_mm512_maskz_mullo_epi32() { #[rustfmt::skip] let a = _mm512_setr_epi32( 0, 1, -1, i32::MAX, @@ -43926,7 +45810,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_mullo_epi32() { + const unsafe fn test_mm256_mask_mullo_epi32() { let a = _mm256_set_epi32(0, 1, -1, i32::MAX, i32::MIN, 100, -100, -32); let b = _mm256_set1_epi32(2); let r = _mm256_mask_mullo_epi32(a, 0, a, b); @@ -43937,7 +45821,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_mullo_epi32() { + const unsafe fn test_mm256_maskz_mullo_epi32() { let a = _mm256_set_epi32(0, 1, -1, i32::MAX, i32::MIN, 100, -100, -32); let b = _mm256_set1_epi32(2); let r = _mm256_maskz_mullo_epi32(0, a, b); @@ -43948,7 +45832,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_mullo_epi32() { + const unsafe fn test_mm_mask_mullo_epi32() { let a = _mm_set_epi32(1, -1, i32::MAX, i32::MIN); let b = _mm_set1_epi32(2); let r = _mm_mask_mullo_epi32(a, 0, a, b); @@ -43959,7 +45843,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_mullo_epi32() { + const unsafe fn test_mm_maskz_mullo_epi32() { let a = _mm_set_epi32(1, -1, i32::MAX, i32::MIN); let b = _mm_set1_epi32(2); let r = _mm_maskz_mullo_epi32(0, a, b); @@ -43970,7 +45854,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mul_ps() { + const unsafe fn test_mm512_mul_ps() { #[rustfmt::skip] let a = _mm512_setr_ps( 0., 1., -1., f32::MAX, @@ -43992,7 +45876,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_mul_ps() { + const unsafe fn test_mm512_mask_mul_ps() { #[rustfmt::skip] let a = _mm512_setr_ps( 0., 1., -1., f32::MAX, @@ -44015,7 +45899,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_mul_ps() { + const unsafe fn test_mm512_maskz_mul_ps() { #[rustfmt::skip] let a = _mm512_setr_ps( 0., 1., -1., f32::MAX, @@ -44038,7 +45922,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_mul_ps() { + const unsafe fn test_mm256_mask_mul_ps() { let a = _mm256_set_ps(0., 1., -1., f32::MAX, f32::MIN, 100., -100., -32.); let b = _mm256_set1_ps(2.); let r = _mm256_mask_mul_ps(a, 0, a, b); @@ -44053,7 +45937,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_mul_ps() { + const unsafe fn test_mm256_maskz_mul_ps() { let a = _mm256_set_ps(0., 1., -1., f32::MAX, f32::MIN, 100., -100., -32.); let b = _mm256_set1_ps(2.); let r = _mm256_maskz_mul_ps(0, a, b); @@ -44068,7 +45952,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_mul_ps() { + const unsafe fn test_mm_mask_mul_ps() { let a = _mm_set_ps(1., -1., f32::MAX, f32::MIN); let b = _mm_set1_ps(2.); let r = _mm_mask_mul_ps(a, 0, a, b); @@ -44079,7 +45963,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_mul_ps() { + const unsafe fn test_mm_maskz_mul_ps() { let a = _mm_set_ps(1., -1., f32::MAX, f32::MIN); let b = _mm_set1_ps(2.); let r = _mm_maskz_mul_ps(0, a, b); @@ -44090,7 +45974,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_div_ps() { + const unsafe fn test_mm512_div_ps() { let a = _mm512_setr_ps( 0., 1., -1., -2., 100., 100., -100., -32., 0., 1., -1., 1000., -131., 100., -100., -32., ); @@ -44109,7 +45993,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_div_ps() { + const unsafe fn test_mm512_mask_div_ps() { let a = _mm512_setr_ps( 0., 1., -1., -2., 100., 100., -100., -32., 0., 1., -1., 1000., -131., 100., -100., -32., ); @@ -44130,7 +46014,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_div_ps() { + const unsafe fn test_mm512_maskz_div_ps() { let a = _mm512_setr_ps( 0., 1., -1., -2., 100., 100., -100., -32., 0., 1., -1., 1000., -131., 100., -100., -32., ); @@ -44151,7 +46035,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_div_ps() { + const unsafe fn test_mm256_mask_div_ps() { let a = _mm256_set_ps(0., 1., -1., -2., 100., 100., -100., -32.); let b = _mm256_set_ps(2., 2., 2., 2., 2., 0., 2., 2.); let r = _mm256_mask_div_ps(a, 0, a, b); @@ -44162,7 +46046,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_div_ps() { + const unsafe fn test_mm256_maskz_div_ps() { let a = _mm256_set_ps(0., 1., -1., -2., 100., 100., -100., -32.); let b = _mm256_set_ps(2., 2., 2., 2., 2., 0., 2., 2.); let r = _mm256_maskz_div_ps(0, a, b); @@ -44173,7 +46057,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_div_ps() { + const unsafe fn test_mm_mask_div_ps() { let a = _mm_set_ps(100., 100., -100., -32.); let b = _mm_set_ps(2., 0., 2., 2.); let r = _mm_mask_div_ps(a, 0, a, b); @@ -44184,7 +46068,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_div_ps() { + const unsafe fn test_mm_maskz_div_ps() { let a = _mm_set_ps(100., 100., -100., -32.); let b = _mm_set_ps(2., 0., 2., 2.); let r = _mm_maskz_div_ps(0, a, b); @@ -44195,7 +46079,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_max_epi32() { + const unsafe fn test_mm512_max_epi32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_max_epi32(a, b); @@ -44204,7 +46088,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_max_epi32() { + const unsafe fn test_mm512_mask_max_epi32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_mask_max_epi32(a, 0, a, b); @@ -44215,7 +46099,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_max_epi32() { + const unsafe fn test_mm512_maskz_max_epi32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_maskz_max_epi32(0, a, b); @@ -44226,7 +46110,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_max_epi32() { + const unsafe fn test_mm256_mask_max_epi32() { let a = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_mask_max_epi32(a, 0, a, b); @@ -44237,7 +46121,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_max_epi32() { + const unsafe fn test_mm256_maskz_max_epi32() { let a = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_maskz_max_epi32(0, a, b); @@ -44248,7 +46132,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_max_epi32() { + const unsafe fn test_mm_mask_max_epi32() { let a = _mm_set_epi32(0, 1, 2, 3); let b = _mm_set_epi32(3, 2, 1, 0); let r = _mm_mask_max_epi32(a, 0, a, b); @@ -44259,7 +46143,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_max_epi32() { + const unsafe fn test_mm_maskz_max_epi32() { let a = _mm_set_epi32(0, 1, 2, 3); let b = _mm_set_epi32(3, 2, 1, 0); let r = _mm_maskz_max_epi32(0, a, b); @@ -44363,7 +46247,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_max_epu32() { + const unsafe fn test_mm512_max_epu32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_max_epu32(a, b); @@ -44372,7 +46256,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_max_epu32() { + const unsafe fn test_mm512_mask_max_epu32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_mask_max_epu32(a, 0, a, b); @@ -44383,7 +46267,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_max_epu32() { + const unsafe fn test_mm512_maskz_max_epu32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_maskz_max_epu32(0, a, b); @@ -44394,7 +46278,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_max_epu32() { + const unsafe fn test_mm256_mask_max_epu32() { let a = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_mask_max_epu32(a, 0, a, b); @@ -44405,7 +46289,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_max_epu32() { + const unsafe fn test_mm256_maskz_max_epu32() { let a = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_maskz_max_epu32(0, a, b); @@ -44416,7 +46300,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_max_epu32() { + const unsafe fn test_mm_mask_max_epu32() { let a = _mm_set_epi32(0, 1, 2, 3); let b = _mm_set_epi32(3, 2, 1, 0); let r = _mm_mask_max_epu32(a, 0, a, b); @@ -44427,7 +46311,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_max_epu32() { + const unsafe fn test_mm_maskz_max_epu32() { let a = _mm_set_epi32(0, 1, 2, 3); let b = _mm_set_epi32(3, 2, 1, 0); let r = _mm_maskz_max_epu32(0, a, b); @@ -44438,7 +46322,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_min_epi32() { + const unsafe fn test_mm512_min_epi32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_min_epi32(a, b); @@ -44447,7 +46331,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_min_epi32() { + const unsafe fn test_mm512_mask_min_epi32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_mask_min_epi32(a, 0, a, b); @@ -44458,7 +46342,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_min_epi32() { + const unsafe fn test_mm512_maskz_min_epi32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_maskz_min_epi32(0, a, b); @@ -44469,7 +46353,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_min_epi32() { + const unsafe fn test_mm256_mask_min_epi32() { let a = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_mask_min_epi32(a, 0, a, b); @@ -44480,7 +46364,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_min_epi32() { + const unsafe fn test_mm256_maskz_min_epi32() { let a = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_maskz_min_epi32(0, a, b); @@ -44491,7 +46375,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_min_epi32() { + const unsafe fn test_mm_mask_min_epi32() { let a = _mm_set_epi32(0, 1, 2, 3); let b = _mm_set_epi32(3, 2, 1, 0); let r = _mm_mask_min_epi32(a, 0, a, b); @@ -44502,7 +46386,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_min_epi32() { + const unsafe fn test_mm_maskz_min_epi32() { let a = _mm_set_epi32(0, 1, 2, 3); let b = _mm_set_epi32(3, 2, 1, 0); let r = _mm_maskz_min_epi32(0, a, b); @@ -44606,7 +46490,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_min_epu32() { + const unsafe fn test_mm512_min_epu32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_min_epu32(a, b); @@ -44615,7 +46499,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_min_epu32() { + const unsafe fn test_mm512_mask_min_epu32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_mask_min_epu32(a, 0, a, b); @@ -44626,7 +46510,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_min_epu32() { + const unsafe fn test_mm512_maskz_min_epu32() { let a = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm512_setr_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_maskz_min_epu32(0, a, b); @@ -44637,7 +46521,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_min_epu32() { + const unsafe fn test_mm256_mask_min_epu32() { let a = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_mask_min_epu32(a, 0, a, b); @@ -44648,7 +46532,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_min_epu32() { + const unsafe fn test_mm256_maskz_min_epu32() { let a = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm256_maskz_min_epu32(0, a, b); @@ -44659,7 +46543,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_min_epu32() { + const unsafe fn test_mm_mask_min_epu32() { let a = _mm_set_epi32(0, 1, 2, 3); let b = _mm_set_epi32(3, 2, 1, 0); let r = _mm_mask_min_epu32(a, 0, a, b); @@ -44670,7 +46554,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_min_epu32() { + const unsafe fn test_mm_maskz_min_epu32() { let a = _mm_set_epi32(0, 1, 2, 3); let b = _mm_set_epi32(3, 2, 1, 0); let r = _mm_maskz_min_epu32(0, a, b); @@ -44761,7 +46645,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fmadd_ps() { + const unsafe fn test_mm512_fmadd_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -44775,7 +46659,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fmadd_ps() { + const unsafe fn test_mm512_mask_fmadd_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -44791,7 +46675,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fmadd_ps() { + const unsafe fn test_mm512_maskz_fmadd_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -44807,7 +46691,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fmadd_ps() { + const unsafe fn test_mm512_mask3_fmadd_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -44823,7 +46707,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fmadd_ps() { + const unsafe fn test_mm256_mask_fmadd_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -44835,7 +46719,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fmadd_ps() { + const unsafe fn test_mm256_maskz_fmadd_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -44847,7 +46731,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fmadd_ps() { + const unsafe fn test_mm256_mask3_fmadd_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -44859,7 +46743,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fmadd_ps() { + const unsafe fn test_mm_mask_fmadd_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -44871,7 +46755,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fmadd_ps() { + const unsafe fn test_mm_maskz_fmadd_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -44883,7 +46767,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fmadd_ps() { + const unsafe fn test_mm_mask3_fmadd_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -44895,7 +46779,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fmsub_ps() { + const unsafe fn test_mm512_fmsub_ps() { let a = _mm512_setr_ps( 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., ); @@ -44913,7 +46797,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fmsub_ps() { + const unsafe fn test_mm512_mask_fmsub_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -44929,7 +46813,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fmsub_ps() { + const unsafe fn test_mm512_maskz_fmsub_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -44945,7 +46829,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fmsub_ps() { + const unsafe fn test_mm512_mask3_fmsub_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -44963,7 +46847,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fmsub_ps() { + const unsafe fn test_mm256_mask_fmsub_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -44975,7 +46859,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fmsub_ps() { + const unsafe fn test_mm256_maskz_fmsub_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -44987,7 +46871,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fmsub_ps() { + const unsafe fn test_mm256_mask3_fmsub_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -44999,7 +46883,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fmsub_ps() { + const unsafe fn test_mm_mask_fmsub_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45011,7 +46895,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fmsub_ps() { + const unsafe fn test_mm_maskz_fmsub_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45023,7 +46907,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fmsub_ps() { + const unsafe fn test_mm_mask3_fmsub_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45035,7 +46919,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fmaddsub_ps() { + const unsafe fn test_mm512_fmaddsub_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45049,7 +46933,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fmaddsub_ps() { + const unsafe fn test_mm512_mask_fmaddsub_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45065,7 +46949,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fmaddsub_ps() { + const unsafe fn test_mm512_maskz_fmaddsub_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45081,7 +46965,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fmaddsub_ps() { + const unsafe fn test_mm512_mask3_fmaddsub_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45099,7 +46983,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fmaddsub_ps() { + const unsafe fn test_mm256_mask_fmaddsub_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45111,7 +46995,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fmaddsub_ps() { + const unsafe fn test_mm256_maskz_fmaddsub_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45123,7 +47007,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fmaddsub_ps() { + const unsafe fn test_mm256_mask3_fmaddsub_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45135,7 +47019,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fmaddsub_ps() { + const unsafe fn test_mm_mask_fmaddsub_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45147,7 +47031,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fmaddsub_ps() { + const unsafe fn test_mm_maskz_fmaddsub_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45159,7 +47043,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fmaddsub_ps() { + const unsafe fn test_mm_mask3_fmaddsub_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45171,7 +47055,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fmsubadd_ps() { + const unsafe fn test_mm512_fmsubadd_ps() { let a = _mm512_setr_ps( 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., ); @@ -45189,7 +47073,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fmsubadd_ps() { + const unsafe fn test_mm512_mask_fmsubadd_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45205,7 +47089,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fmsubadd_ps() { + const unsafe fn test_mm512_maskz_fmsubadd_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45221,7 +47105,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fmsubadd_ps() { + const unsafe fn test_mm512_mask3_fmsubadd_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45239,7 +47123,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fmsubadd_ps() { + const unsafe fn test_mm256_mask_fmsubadd_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45251,7 +47135,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fmsubadd_ps() { + const unsafe fn test_mm256_maskz_fmsubadd_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45263,7 +47147,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fmsubadd_ps() { + const unsafe fn test_mm256_mask3_fmsubadd_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45275,7 +47159,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fmsubadd_ps() { + const unsafe fn test_mm_mask_fmsubadd_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45287,7 +47171,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fmsubadd_ps() { + const unsafe fn test_mm_maskz_fmsubadd_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45299,7 +47183,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fmsubadd_ps() { + const unsafe fn test_mm_mask3_fmsubadd_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45311,7 +47195,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fnmadd_ps() { + const unsafe fn test_mm512_fnmadd_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45325,7 +47209,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fnmadd_ps() { + const unsafe fn test_mm512_mask_fnmadd_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45341,7 +47225,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fnmadd_ps() { + const unsafe fn test_mm512_maskz_fnmadd_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45357,7 +47241,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fnmadd_ps() { + const unsafe fn test_mm512_mask3_fnmadd_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45375,7 +47259,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fnmadd_ps() { + const unsafe fn test_mm256_mask_fnmadd_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45387,7 +47271,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fnmadd_ps() { + const unsafe fn test_mm256_maskz_fnmadd_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45399,7 +47283,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fnmadd_ps() { + const unsafe fn test_mm256_mask3_fnmadd_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45411,7 +47295,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fnmadd_ps() { + const unsafe fn test_mm_mask_fnmadd_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45423,7 +47307,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fnmadd_ps() { + const unsafe fn test_mm_maskz_fnmadd_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45435,7 +47319,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fnmadd_ps() { + const unsafe fn test_mm_mask3_fnmadd_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45447,7 +47331,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fnmsub_ps() { + const unsafe fn test_mm512_fnmsub_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45461,7 +47345,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fnmsub_ps() { + const unsafe fn test_mm512_mask_fnmsub_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45477,7 +47361,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fnmsub_ps() { + const unsafe fn test_mm512_maskz_fnmsub_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45493,7 +47377,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fnmsub_ps() { + const unsafe fn test_mm512_mask3_fnmsub_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., @@ -45511,7 +47395,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fnmsub_ps() { + const unsafe fn test_mm256_mask_fnmsub_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45523,7 +47407,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fnmsub_ps() { + const unsafe fn test_mm256_maskz_fnmsub_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45535,7 +47419,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fnmsub_ps() { + const unsafe fn test_mm256_mask3_fnmsub_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm256_set1_ps(1.); @@ -45547,7 +47431,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fnmsub_ps() { + const unsafe fn test_mm_mask_fnmsub_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45559,7 +47443,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fnmsub_ps() { + const unsafe fn test_mm_maskz_fnmsub_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -45571,7 +47455,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fnmsub_ps() { + const unsafe fn test_mm_mask3_fnmsub_ps() { let a = _mm_set1_ps(1.); let b = _mm_set_ps(0., 1., 2., 3.); let c = _mm_set1_ps(1.); @@ -47733,7 +49617,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi8_epi32() { + const unsafe fn test_mm512_cvtepi8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi8_epi32(a); let e = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -47741,7 +49625,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi8_epi32() { + const unsafe fn test_mm512_mask_cvtepi8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_epi32(-1); let r = _mm512_mask_cvtepi8_epi32(src, 0, a); @@ -47752,7 +49636,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepi8_epi32() { + const unsafe fn test_mm512_maskz_cvtepi8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepi8_epi32(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -47762,7 +49646,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepi8_epi32() { + const unsafe fn test_mm256_mask_cvtepi8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm256_set1_epi32(-1); let r = _mm256_mask_cvtepi8_epi32(src, 0, a); @@ -47773,7 +49657,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi8_epi32() { + const unsafe fn test_mm256_maskz_cvtepi8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm256_maskz_cvtepi8_epi32(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -47783,7 +49667,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepi8_epi32() { + const unsafe fn test_mm_mask_cvtepi8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm_set1_epi32(-1); let r = _mm_mask_cvtepi8_epi32(src, 0, a); @@ -47794,7 +49678,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepi8_epi32() { + const unsafe fn test_mm_maskz_cvtepi8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_maskz_cvtepi8_epi32(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -47804,7 +49688,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepu8_epi32() { + const unsafe fn test_mm512_cvtepu8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepu8_epi32(a); let e = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -47812,7 +49696,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepu8_epi32() { + const unsafe fn test_mm512_mask_cvtepu8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_epi32(-1); let r = _mm512_mask_cvtepu8_epi32(src, 0, a); @@ -47823,7 +49707,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepu8_epi32() { + const unsafe fn test_mm512_maskz_cvtepu8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepu8_epi32(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -47833,7 +49717,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepu8_epi32() { + const unsafe fn test_mm256_mask_cvtepu8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm256_set1_epi32(-1); let r = _mm256_mask_cvtepu8_epi32(src, 0, a); @@ -47844,7 +49728,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepu8_epi32() { + const unsafe fn test_mm256_maskz_cvtepu8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm256_maskz_cvtepu8_epi32(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -47854,7 +49738,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepu8_epi32() { + const unsafe fn test_mm_mask_cvtepu8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm_set1_epi32(-1); let r = _mm_mask_cvtepu8_epi32(src, 0, a); @@ -47865,7 +49749,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepu8_epi32() { + const unsafe fn test_mm_maskz_cvtepu8_epi32() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_maskz_cvtepu8_epi32(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -47875,7 +49759,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi16_epi32() { + const unsafe fn test_mm512_cvtepi16_epi32() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi16_epi32(a); let e = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -47883,7 +49767,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi16_epi32() { + const unsafe fn test_mm512_mask_cvtepi16_epi32() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_epi32(-1); let r = _mm512_mask_cvtepi16_epi32(src, 0, a); @@ -47894,7 +49778,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepi16_epi32() { + const unsafe fn test_mm512_maskz_cvtepi16_epi32() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepi16_epi32(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -47904,7 +49788,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepi16_epi32() { + const unsafe fn test_mm256_mask_cvtepi16_epi32() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let src = _mm256_set1_epi32(-1); let r = _mm256_mask_cvtepi16_epi32(src, 0, a); @@ -47915,7 +49799,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi16_epi32() { + const unsafe fn test_mm256_maskz_cvtepi16_epi32() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm256_maskz_cvtepi16_epi32(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -47925,7 +49809,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepi16_epi32() { + const unsafe fn test_mm_mask_cvtepi16_epi32() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let src = _mm_set1_epi32(-1); let r = _mm_mask_cvtepi16_epi32(src, 0, a); @@ -47936,7 +49820,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepi16_epi32() { + const unsafe fn test_mm_maskz_cvtepi16_epi32() { let a = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm_maskz_cvtepi16_epi32(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -47946,7 +49830,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepu16_epi32() { + const unsafe fn test_mm512_cvtepu16_epi32() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepu16_epi32(a); let e = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -47954,7 +49838,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepu16_epi32() { + const unsafe fn test_mm512_mask_cvtepu16_epi32() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_epi32(-1); let r = _mm512_mask_cvtepu16_epi32(src, 0, a); @@ -47965,7 +49849,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepu16_epi32() { + const unsafe fn test_mm512_maskz_cvtepu16_epi32() { let a = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepu16_epi32(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -47975,7 +49859,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepu16_epi32() { + const unsafe fn test_mm256_mask_cvtepu16_epi32() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm256_set1_epi32(-1); let r = _mm256_mask_cvtepu16_epi32(src, 0, a); @@ -47986,7 +49870,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepu16_epi32() { + const unsafe fn test_mm256_maskz_cvtepu16_epi32() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm256_maskz_cvtepu16_epi32(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -47996,7 +49880,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepu16_epi32() { + const unsafe fn test_mm_mask_cvtepu16_epi32() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm_set1_epi32(-1); let r = _mm_mask_cvtepu16_epi32(src, 0, a); @@ -48007,7 +49891,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepu16_epi32() { + const unsafe fn test_mm_maskz_cvtepu16_epi32() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_maskz_cvtepu16_epi32(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -48017,7 +49901,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi32_ps() { + const unsafe fn test_mm512_cvtepi32_ps() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi32_ps(a); let e = _mm512_set_ps( @@ -48027,7 +49911,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi32_ps() { + const unsafe fn test_mm512_mask_cvtepi32_ps() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_ps(-1.); let r = _mm512_mask_cvtepi32_ps(src, 0, a); @@ -48040,7 +49924,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepi32_ps() { + const unsafe fn test_mm512_maskz_cvtepi32_ps() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepi32_ps(0, a); assert_eq_m512(r, _mm512_setzero_ps()); @@ -48052,7 +49936,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepi32_ps() { + const unsafe fn test_mm256_mask_cvtepi32_ps() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let src = _mm256_set1_ps(-1.); let r = _mm256_mask_cvtepi32_ps(src, 0, a); @@ -48063,7 +49947,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi32_ps() { + const unsafe fn test_mm256_maskz_cvtepi32_ps() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_maskz_cvtepi32_ps(0, a); assert_eq_m256(r, _mm256_setzero_ps()); @@ -48073,7 +49957,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepi32_ps() { + const unsafe fn test_mm_mask_cvtepi32_ps() { let a = _mm_set_epi32(1, 2, 3, 4); let src = _mm_set1_ps(-1.); let r = _mm_mask_cvtepi32_ps(src, 0, a); @@ -48084,7 +49968,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepi32_ps() { + const unsafe fn test_mm_maskz_cvtepi32_ps() { let a = _mm_set_epi32(1, 2, 3, 4); let r = _mm_maskz_cvtepi32_ps(0, a); assert_eq_m128(r, _mm_setzero_ps()); @@ -48094,7 +49978,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepu32_ps() { + const unsafe fn test_mm512_cvtepu32_ps() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepu32_ps(a); let e = _mm512_set_ps( @@ -48104,7 +49988,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepu32_ps() { + const unsafe fn test_mm512_mask_cvtepu32_ps() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_ps(-1.); let r = _mm512_mask_cvtepu32_ps(src, 0, a); @@ -48117,7 +50001,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepu32_ps() { + const unsafe fn test_mm512_maskz_cvtepu32_ps() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepu32_ps(0, a); assert_eq_m512(r, _mm512_setzero_ps()); @@ -48129,7 +50013,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi32_epi16() { + const unsafe fn test_mm512_cvtepi32_epi16() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi32_epi16(a); let e = _mm256_set_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -48137,7 +50021,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi32_epi16() { + const unsafe fn test_mm512_mask_cvtepi32_epi16() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm256_set1_epi16(-1); let r = _mm512_mask_cvtepi32_epi16(src, 0, a); @@ -48148,7 +50032,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepi32_epi16() { + const unsafe fn test_mm512_maskz_cvtepi32_epi16() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepi32_epi16(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -48158,7 +50042,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cvtepi32_epi16() { + const unsafe fn test_mm256_cvtepi32_epi16() { let a = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm256_cvtepi32_epi16(a); let e = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); @@ -48166,7 +50050,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepi32_epi16() { + const unsafe fn test_mm256_mask_cvtepi32_epi16() { let a = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); let src = _mm_set1_epi16(-1); let r = _mm256_mask_cvtepi32_epi16(src, 0, a); @@ -48177,7 +50061,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi32_epi16() { + const unsafe fn test_mm256_maskz_cvtepi32_epi16() { let a = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm256_maskz_cvtepi32_epi16(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -48216,7 +50100,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi32_epi8() { + const unsafe fn test_mm512_cvtepi32_epi8() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi32_epi8(a); let e = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -48224,7 +50108,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi32_epi8() { + const unsafe fn test_mm512_mask_cvtepi32_epi8() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm_set1_epi8(-1); let r = _mm512_mask_cvtepi32_epi8(src, 0, a); @@ -48235,7 +50119,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepi32_epi8() { + const unsafe fn test_mm512_maskz_cvtepi32_epi8() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepi32_epi8(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -49984,7 +51868,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmplt_epu32_mask() { + const unsafe fn test_mm512_cmplt_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -49994,7 +51878,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmplt_epu32_mask() { + const unsafe fn test_mm512_mask_cmplt_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50005,7 +51889,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmplt_epu32_mask() { + const unsafe fn test_mm256_cmplt_epu32_mask() { let a = _mm256_set_epi32(0, 1, 2, u32::MAX as i32, i32::MAX, 101, 100, 99); let b = _mm256_set1_epi32(1); let r = _mm256_cmplt_epu32_mask(a, b); @@ -50013,7 +51897,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmplt_epu32_mask() { + const unsafe fn test_mm256_mask_cmplt_epu32_mask() { let a = _mm256_set_epi32(0, 1, 2, u32::MAX as i32, i32::MAX, 101, 100, 99); let b = _mm256_set1_epi32(1); let mask = 0b11111111; @@ -50022,7 +51906,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmplt_epu32_mask() { + const unsafe fn test_mm_cmplt_epu32_mask() { let a = _mm_set_epi32(0, 1, 2, u32::MAX as i32); let b = _mm_set1_epi32(1); let r = _mm_cmplt_epu32_mask(a, b); @@ -50030,7 +51914,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmplt_epu32_mask() { + const unsafe fn test_mm_mask_cmplt_epu32_mask() { let a = _mm_set_epi32(0, 1, 2, u32::MAX as i32); let b = _mm_set1_epi32(1); let mask = 0b11111111; @@ -50039,7 +51923,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpgt_epu32_mask() { + const unsafe fn test_mm512_cmpgt_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50049,7 +51933,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpgt_epu32_mask() { + const unsafe fn test_mm512_mask_cmpgt_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50060,7 +51944,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpgt_epu32_mask() { + const unsafe fn test_mm256_cmpgt_epu32_mask() { let a = _mm256_set_epi32(0, 1, 2, u32::MAX as i32, i32::MAX, 99, 100, 101); let b = _mm256_set1_epi32(1); let r = _mm256_cmpgt_epu32_mask(a, b); @@ -50068,7 +51952,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpgt_epu32_mask() { + const unsafe fn test_mm256_mask_cmpgt_epu32_mask() { let a = _mm256_set_epi32(0, 1, 2, u32::MAX as i32, i32::MAX, 99, 100, 101); let b = _mm256_set1_epi32(1); let mask = 0b11111111; @@ -50077,7 +51961,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpgt_epu32_mask() { + const unsafe fn test_mm_cmpgt_epu32_mask() { let a = _mm_set_epi32(0, 1, 2, u32::MAX as i32); let b = _mm_set1_epi32(1); let r = _mm_cmpgt_epu32_mask(a, b); @@ -50085,7 +51969,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpgt_epu32_mask() { + const unsafe fn test_mm_mask_cmpgt_epu32_mask() { let a = _mm_set_epi32(0, 1, 2, u32::MAX as i32); let b = _mm_set1_epi32(1); let mask = 0b11111111; @@ -50094,7 +51978,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmple_epu32_mask() { + const unsafe fn test_mm512_cmple_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50106,7 +51990,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmple_epu32_mask() { + const unsafe fn test_mm512_mask_cmple_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50119,7 +52003,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmple_epu32_mask() { + const unsafe fn test_mm256_cmple_epu32_mask() { let a = _mm256_set_epi32(0, 1, 2, u32::MAX as i32, i32::MAX, 200, 100, 101); let b = _mm256_set1_epi32(1); let r = _mm256_cmple_epu32_mask(a, b); @@ -50127,7 +52011,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmple_epu32_mask() { + const unsafe fn test_mm256_mask_cmple_epu32_mask() { let a = _mm256_set_epi32(0, 1, 2, u32::MAX as i32, i32::MAX, 200, 100, 101); let b = _mm256_set1_epi32(1); let mask = 0b11111111; @@ -50136,7 +52020,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmple_epu32_mask() { + const unsafe fn test_mm_cmple_epu32_mask() { let a = _mm_set_epi32(0, 1, 2, u32::MAX as i32); let b = _mm_set1_epi32(1); let r = _mm_cmple_epu32_mask(a, b); @@ -50144,7 +52028,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmple_epu32_mask() { + const unsafe fn test_mm_mask_cmple_epu32_mask() { let a = _mm_set_epi32(0, 1, 2, u32::MAX as i32); let b = _mm_set1_epi32(1); let mask = 0b11111111; @@ -50153,7 +52037,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpge_epu32_mask() { + const unsafe fn test_mm512_cmpge_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50165,7 +52049,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpge_epu32_mask() { + const unsafe fn test_mm512_mask_cmpge_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50175,7 +52059,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpge_epu32_mask() { + const unsafe fn test_mm256_cmpge_epu32_mask() { let a = _mm256_set_epi32(0, 1, 2, u32::MAX as i32, i32::MAX, 300, 100, 200); let b = _mm256_set1_epi32(1); let r = _mm256_cmpge_epu32_mask(a, b); @@ -50183,7 +52067,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpge_epu32_mask() { + const unsafe fn test_mm256_mask_cmpge_epu32_mask() { let a = _mm256_set_epi32(0, 1, 2, u32::MAX as i32, i32::MAX, 300, 100, 200); let b = _mm256_set1_epi32(1); let mask = 0b11111111; @@ -50192,7 +52076,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpge_epu32_mask() { + const unsafe fn test_mm_cmpge_epu32_mask() { let a = _mm_set_epi32(0, 1, 2, u32::MAX as i32); let b = _mm_set1_epi32(1); let r = _mm_cmpge_epu32_mask(a, b); @@ -50200,7 +52084,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpge_epu32_mask() { + const unsafe fn test_mm_mask_cmpge_epu32_mask() { let a = _mm_set_epi32(0, 1, 2, u32::MAX as i32); let b = _mm_set1_epi32(1); let mask = 0b11111111; @@ -50209,7 +52093,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpeq_epu32_mask() { + const unsafe fn test_mm512_cmpeq_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50221,7 +52105,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpeq_epu32_mask() { + const unsafe fn test_mm512_mask_cmpeq_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50234,7 +52118,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpeq_epu32_mask() { + const unsafe fn test_mm256_cmpeq_epu32_mask() { let a = _mm256_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set_epi32(0, 1, 13, 42, i32::MAX, i32::MIN, 100, -100); let m = _mm256_cmpeq_epu32_mask(b, a); @@ -50242,7 +52126,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpeq_epu32_mask() { + const unsafe fn test_mm256_mask_cmpeq_epu32_mask() { let a = _mm256_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set_epi32(0, 1, 13, 42, i32::MAX, i32::MIN, 100, -100); let mask = 0b01111010; @@ -50251,7 +52135,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpeq_epu32_mask() { + const unsafe fn test_mm_cmpeq_epu32_mask() { let a = _mm_set_epi32(0, 1, -1, u32::MAX as i32); let b = _mm_set_epi32(0, 1, 13, 42); let m = _mm_cmpeq_epu32_mask(b, a); @@ -50259,7 +52143,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpeq_epu32_mask() { + const unsafe fn test_mm_mask_cmpeq_epu32_mask() { let a = _mm_set_epi32(0, 1, -1, u32::MAX as i32); let b = _mm_set_epi32(0, 1, 13, 42); let mask = 0b11111111; @@ -50268,7 +52152,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpneq_epu32_mask() { + const unsafe fn test_mm512_cmpneq_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50280,7 +52164,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpneq_epu32_mask() { + const unsafe fn test_mm512_mask_cmpneq_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, -100, 100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, -100, 100); @@ -50293,7 +52177,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpneq_epu32_mask() { + const unsafe fn test_mm256_cmpneq_epu32_mask() { let a = _mm256_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, -100, 100); let b = _mm256_set_epi32(0, 1, 13, 42, i32::MAX, i32::MIN, -100, 100); let r = _mm256_cmpneq_epu32_mask(b, a); @@ -50301,7 +52185,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpneq_epu32_mask() { + const unsafe fn test_mm256_mask_cmpneq_epu32_mask() { let a = _mm256_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, -100, 100); let b = _mm256_set_epi32(0, 1, 13, 42, i32::MAX, i32::MIN, -100, 100); let mask = 0b11111111; @@ -50310,7 +52194,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpneq_epu32_mask() { + const unsafe fn test_mm_cmpneq_epu32_mask() { let a = _mm_set_epi32(0, 1, -1, u32::MAX as i32); let b = _mm_set_epi32(0, 1, 13, 42); let r = _mm_cmpneq_epu32_mask(b, a); @@ -50318,7 +52202,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpneq_epu32_mask() { + const unsafe fn test_mm_mask_cmpneq_epu32_mask() { let a = _mm_set_epi32(0, 1, -1, u32::MAX as i32); let b = _mm_set_epi32(0, 1, 13, 42); let mask = 0b11111111; @@ -50327,7 +52211,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmp_epu32_mask() { + const unsafe fn test_mm512_cmp_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50337,7 +52221,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmp_epu32_mask() { + const unsafe fn test_mm512_mask_cmp_epu32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50348,7 +52232,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmp_epu32_mask() { + const unsafe fn test_mm256_cmp_epu32_mask() { let a = _mm256_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let m = _mm256_cmp_epu32_mask::<_MM_CMPINT_LT>(a, b); @@ -50356,7 +52240,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmp_epu32_mask() { + const unsafe fn test_mm256_mask_cmp_epu32_mask() { let a = _mm256_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let mask = 0b11111111; @@ -50365,7 +52249,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmp_epu32_mask() { + const unsafe fn test_mm_cmp_epu32_mask() { let a = _mm_set_epi32(0, 1, -1, i32::MAX); let b = _mm_set1_epi32(1); let m = _mm_cmp_epu32_mask::<_MM_CMPINT_LT>(a, b); @@ -50373,7 +52257,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmp_epu32_mask() { + const unsafe fn test_mm_mask_cmp_epu32_mask() { let a = _mm_set_epi32(0, 1, -1, i32::MAX); let b = _mm_set1_epi32(1); let mask = 0b11111111; @@ -50382,7 +52266,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmplt_epi32_mask() { + const unsafe fn test_mm512_cmplt_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50392,7 +52276,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmplt_epi32_mask() { + const unsafe fn test_mm512_mask_cmplt_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50403,7 +52287,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmplt_epi32_mask() { + const unsafe fn test_mm256_cmplt_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 101, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let r = _mm256_cmplt_epi32_mask(a, b); @@ -50411,7 +52295,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmplt_epi32_mask() { + const unsafe fn test_mm256_mask_cmplt_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 101, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let mask = 0b11111111; @@ -50420,7 +52304,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmplt_epi32_mask() { + const unsafe fn test_mm_cmplt_epi32_mask() { let a = _mm_set_epi32(i32::MAX, i32::MIN, 100, -100); let b = _mm_set1_epi32(-1); let r = _mm_cmplt_epi32_mask(a, b); @@ -50428,7 +52312,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmplt_epi32_mask() { + const unsafe fn test_mm_mask_cmplt_epi32_mask() { let a = _mm_set_epi32(i32::MAX, i32::MIN, 100, -100); let b = _mm_set1_epi32(-1); let mask = 0b11111111; @@ -50437,7 +52321,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpgt_epi32_mask() { + const unsafe fn test_mm512_cmpgt_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); @@ -50447,7 +52331,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpgt_epi32_mask() { + const unsafe fn test_mm512_mask_cmpgt_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); @@ -50458,7 +52342,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpgt_epi32_mask() { + const unsafe fn test_mm256_cmpgt_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let r = _mm256_cmpgt_epi32_mask(a, b); @@ -50466,7 +52350,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpgt_epi32_mask() { + const unsafe fn test_mm256_mask_cmpgt_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let mask = 0b11111111; @@ -50475,7 +52359,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpgt_epi32_mask() { + const unsafe fn test_mm_cmpgt_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, 13); let b = _mm_set1_epi32(-1); let r = _mm_cmpgt_epi32_mask(a, b); @@ -50483,7 +52367,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpgt_epi32_mask() { + const unsafe fn test_mm_mask_cmpgt_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, 13); let b = _mm_set1_epi32(-1); let mask = 0b11111111; @@ -50492,7 +52376,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmple_epi32_mask() { + const unsafe fn test_mm512_cmple_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50504,7 +52388,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmple_epi32_mask() { + const unsafe fn test_mm512_mask_cmple_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50514,7 +52398,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmple_epi32_mask() { + const unsafe fn test_mm256_cmple_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 200, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let r = _mm256_cmple_epi32_mask(a, b); @@ -50522,7 +52406,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmple_epi32_mask() { + const unsafe fn test_mm256_mask_cmple_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 200, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let mask = 0b11111111; @@ -50531,7 +52415,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmple_epi32_mask() { + const unsafe fn test_mm_cmple_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, 200); let b = _mm_set1_epi32(-1); let r = _mm_cmple_epi32_mask(a, b); @@ -50539,7 +52423,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmple_epi32_mask() { + const unsafe fn test_mm_mask_cmple_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, 200); let b = _mm_set1_epi32(-1); let mask = 0b11111111; @@ -50548,7 +52432,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpge_epi32_mask() { + const unsafe fn test_mm512_cmpge_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50560,7 +52444,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpge_epi32_mask() { + const unsafe fn test_mm512_mask_cmpge_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); @@ -50573,7 +52457,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpge_epi32_mask() { + const unsafe fn test_mm256_cmpge_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let r = _mm256_cmpge_epi32_mask(a, b); @@ -50581,7 +52465,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpge_epi32_mask() { + const unsafe fn test_mm256_mask_cmpge_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, u32::MAX as i32, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let mask = 0b11111111; @@ -50590,7 +52474,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpge_epi32_mask() { + const unsafe fn test_mm_cmpge_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, u32::MAX as i32); let b = _mm_set1_epi32(-1); let r = _mm_cmpge_epi32_mask(a, b); @@ -50598,7 +52482,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpge_epi32_mask() { + const unsafe fn test_mm_mask_cmpge_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, u32::MAX as i32); let b = _mm_set1_epi32(-1); let mask = 0b11111111; @@ -50607,7 +52491,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpeq_epi32_mask() { + const unsafe fn test_mm512_cmpeq_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); @@ -50619,7 +52503,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpeq_epi32_mask() { + const unsafe fn test_mm512_mask_cmpeq_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); @@ -50632,7 +52516,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpeq_epi32_mask() { + const unsafe fn test_mm256_cmpeq_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set_epi32(0, 1, 13, 42, i32::MAX, i32::MIN, 100, -100); let m = _mm256_cmpeq_epi32_mask(b, a); @@ -50640,7 +52524,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpeq_epi32_mask() { + const unsafe fn test_mm256_mask_cmpeq_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set_epi32(0, 1, 13, 42, i32::MAX, i32::MIN, 100, -100); let mask = 0b01111010; @@ -50649,7 +52533,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpeq_epi32_mask() { + const unsafe fn test_mm_cmpeq_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, 13); let b = _mm_set_epi32(0, 1, 13, 42); let m = _mm_cmpeq_epi32_mask(b, a); @@ -50657,7 +52541,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpeq_epi32_mask() { + const unsafe fn test_mm_mask_cmpeq_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, 13); let b = _mm_set_epi32(0, 1, 13, 42); let mask = 0b11111111; @@ -50666,7 +52550,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpneq_epi32_mask() { + const unsafe fn test_mm512_cmpneq_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); @@ -50678,7 +52562,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpneq_epi32_mask() { + const unsafe fn test_mm512_mask_cmpneq_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, -100, 100, 0, 1, -1, 13, i32::MAX, i32::MIN, -100, 100); @@ -50691,7 +52575,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpneq_epi32_mask() { + const unsafe fn test_mm256_cmpneq_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set_epi32(0, 1, 13, 42, i32::MAX, i32::MIN, 100, -100); let m = _mm256_cmpneq_epi32_mask(b, a); @@ -50699,7 +52583,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpneq_epi32_mask() { + const unsafe fn test_mm256_mask_cmpneq_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, -100, 100); let b = _mm256_set_epi32(0, 1, 13, 42, i32::MAX, i32::MIN, 100, -100); let mask = 0b11111111; @@ -50708,7 +52592,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpneq_epi32_mask() { + const unsafe fn test_mm_cmpneq_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, 13); let b = _mm_set_epi32(0, 1, 13, 42); let r = _mm_cmpneq_epi32_mask(b, a); @@ -50716,7 +52600,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpneq_epi32_mask() { + const unsafe fn test_mm_mask_cmpneq_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, 13); let b = _mm_set_epi32(0, 1, 13, 42); let mask = 0b11111111; @@ -50725,7 +52609,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmp_epi32_mask() { + const unsafe fn test_mm512_cmp_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); @@ -50735,7 +52619,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmp_epi32_mask() { + const unsafe fn test_mm512_mask_cmp_epi32_mask() { #[rustfmt::skip] let a = _mm512_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100, 0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); @@ -50746,7 +52630,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmp_epi32_mask() { + const unsafe fn test_mm256_cmp_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let m = _mm256_cmp_epi32_mask::<_MM_CMPINT_LT>(a, b); @@ -50754,7 +52638,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmp_epi32_mask() { + const unsafe fn test_mm256_mask_cmp_epi32_mask() { let a = _mm256_set_epi32(0, 1, -1, 13, i32::MAX, i32::MIN, 100, -100); let b = _mm256_set1_epi32(-1); let mask = 0b01100110; @@ -50763,7 +52647,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmp_epi32_mask() { + const unsafe fn test_mm_cmp_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, 13); let b = _mm_set1_epi32(1); let m = _mm_cmp_epi32_mask::<_MM_CMPINT_LT>(a, b); @@ -50771,7 +52655,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmp_epi32_mask() { + const unsafe fn test_mm_mask_cmp_epi32_mask() { let a = _mm_set_epi32(0, 1, -1, 13); let b = _mm_set1_epi32(1); let mask = 0b11111111; @@ -50780,7 +52664,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set_epi8() { + const unsafe fn test_mm512_set_epi8() { let r = _mm512_set1_epi8(2); assert_eq_m512i( r, @@ -50793,7 +52677,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set_epi16() { + const unsafe fn test_mm512_set_epi16() { let r = _mm512_set1_epi16(2); assert_eq_m512i( r, @@ -50805,7 +52689,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set_epi32() { + const unsafe fn test_mm512_set_epi32() { let r = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); assert_eq_m512i( r, @@ -50814,7 +52698,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setr_epi32() { + const unsafe fn test_mm512_setr_epi32() { let r = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); assert_eq_m512i( r, @@ -50823,7 +52707,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set1_epi8() { + const unsafe fn test_mm512_set1_epi8() { let r = _mm512_set_epi8( 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, @@ -50833,7 +52717,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set1_epi16() { + const unsafe fn test_mm512_set1_epi16() { let r = _mm512_set_epi16( 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, @@ -50842,23 +52726,23 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set1_epi32() { + const unsafe fn test_mm512_set1_epi32() { let r = _mm512_set_epi32(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2); assert_eq_m512i(r, _mm512_set1_epi32(2)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setzero_si512() { + const unsafe fn test_mm512_setzero_si512() { assert_eq_m512i(_mm512_set1_epi32(0), _mm512_setzero_si512()); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setzero_epi32() { + const unsafe fn test_mm512_setzero_epi32() { assert_eq_m512i(_mm512_set1_epi32(0), _mm512_setzero_epi32()); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set_ps() { + const unsafe fn test_mm512_set_ps() { let r = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., ); @@ -50871,7 +52755,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setr_ps() { + const unsafe fn test_mm512_setr_ps() { let r = _mm512_set_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., ); @@ -50884,7 +52768,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set1_ps() { + const unsafe fn test_mm512_set1_ps() { #[rustfmt::skip] let expected = _mm512_set_ps(2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.); @@ -50892,13 +52776,13 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set4_epi32() { + const unsafe fn test_mm512_set4_epi32() { let r = _mm512_set_epi32(4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1); assert_eq_m512i(r, _mm512_set4_epi32(4, 3, 2, 1)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set4_ps() { + const unsafe fn test_mm512_set4_ps() { let r = _mm512_set_ps( 4., 3., 2., 1., 4., 3., 2., 1., 4., 3., 2., 1., 4., 3., 2., 1., ); @@ -50906,13 +52790,13 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setr4_epi32() { + const unsafe fn test_mm512_setr4_epi32() { let r = _mm512_set_epi32(4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1, 4, 3, 2, 1); assert_eq_m512i(r, _mm512_setr4_epi32(1, 2, 3, 4)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setr4_ps() { + const unsafe fn test_mm512_setr4_ps() { let r = _mm512_set_ps( 4., 3., 2., 1., 4., 3., 2., 1., 4., 3., 2., 1., 4., 3., 2., 1., ); @@ -50920,17 +52804,17 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setzero_ps() { + const unsafe fn test_mm512_setzero_ps() { assert_eq_m512(_mm512_setzero_ps(), _mm512_set1_ps(0.)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setzero() { + const unsafe fn test_mm512_setzero() { assert_eq_m512(_mm512_setzero(), _mm512_set1_ps(0.)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_loadu_pd() { + const unsafe fn test_mm512_loadu_pd() { let a = &[4., 3., 2., 5., 8., 9., 64., 50.]; let p = a.as_ptr(); let r = _mm512_loadu_pd(black_box(p)); @@ -50939,7 +52823,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_storeu_pd() { + const unsafe fn test_mm512_storeu_pd() { let a = _mm512_set1_pd(9.); let mut r = _mm512_undefined_pd(); _mm512_storeu_pd(&mut r as *mut _ as *mut f64, a); @@ -50947,7 +52831,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_loadu_ps() { + const unsafe fn test_mm512_loadu_ps() { let a = &[ 4., 3., 2., 5., 8., 9., 64., 50., -4., -3., -2., -5., -8., -9., -64., -50., ]; @@ -50960,7 +52844,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_storeu_ps() { + const unsafe fn test_mm512_storeu_ps() { let a = _mm512_set1_ps(9.); let mut r = _mm512_undefined_ps(); _mm512_storeu_ps(&mut r as *mut _ as *mut f32, a); @@ -50968,7 +52852,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_loadu_epi32() { + const unsafe fn test_mm512_mask_loadu_epi32() { let src = _mm512_set1_epi32(42); let a = &[1_i32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let p = a.as_ptr(); @@ -50979,7 +52863,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_loadu_epi32() { + const unsafe fn test_mm512_maskz_loadu_epi32() { let a = &[1_i32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let p = a.as_ptr(); let m = 0b11101000_11001010; @@ -50989,7 +52873,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_load_epi32() { + const unsafe fn test_mm512_mask_load_epi32() { #[repr(align(64))] struct Align { data: [i32; 16], // 64 bytes @@ -51006,7 +52890,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_load_epi32() { + const unsafe fn test_mm512_maskz_load_epi32() { #[repr(align(64))] struct Align { data: [i32; 16], // 64 bytes @@ -51022,7 +52906,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_storeu_epi32() { + const unsafe fn test_mm512_mask_storeu_epi32() { let mut r = [42_i32; 16]; let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let m = 0b11101000_11001010; @@ -51032,7 +52916,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_store_epi32() { + const unsafe fn test_mm512_mask_store_epi32() { #[repr(align(64))] struct Align { data: [i32; 16], @@ -51046,7 +52930,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_loadu_epi64() { + const unsafe fn test_mm512_mask_loadu_epi64() { let src = _mm512_set1_epi64(42); let a = &[1_i64, 2, 3, 4, 5, 6, 7, 8]; let p = a.as_ptr(); @@ -51057,7 +52941,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_loadu_epi64() { + const unsafe fn test_mm512_maskz_loadu_epi64() { let a = &[1_i64, 2, 3, 4, 5, 6, 7, 8]; let p = a.as_ptr(); let m = 0b11001010; @@ -51067,7 +52951,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_load_epi64() { + const unsafe fn test_mm512_mask_load_epi64() { #[repr(align(64))] struct Align { data: [i64; 8], // 64 bytes @@ -51084,7 +52968,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_load_epi64() { + const unsafe fn test_mm512_maskz_load_epi64() { #[repr(align(64))] struct Align { data: [i64; 8], // 64 bytes @@ -51100,7 +52984,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_storeu_epi64() { + const unsafe fn test_mm512_mask_storeu_epi64() { let mut r = [42_i64; 8]; let a = _mm512_setr_epi64(1, 2, 3, 4, 5, 6, 7, 8); let m = 0b11001010; @@ -51110,7 +52994,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_store_epi64() { + const unsafe fn test_mm512_mask_store_epi64() { #[repr(align(64))] struct Align { data: [i64; 8], @@ -51125,7 +53009,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_loadu_ps() { + const unsafe fn test_mm512_mask_loadu_ps() { let src = _mm512_set1_ps(42.0); let a = &[ 1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, @@ -51142,7 +53026,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_loadu_ps() { + const unsafe fn test_mm512_maskz_loadu_ps() { let a = &[ 1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, @@ -51157,7 +53041,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_load_ps() { + const unsafe fn test_mm512_mask_load_ps() { #[repr(align(64))] struct Align { data: [f32; 16], // 64 bytes @@ -51180,7 +53064,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_load_ps() { + const unsafe fn test_mm512_maskz_load_ps() { #[repr(align(64))] struct Align { data: [f32; 16], // 64 bytes @@ -51201,7 +53085,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_storeu_ps() { + const unsafe fn test_mm512_mask_storeu_ps() { let mut r = [42_f32; 16]; let a = _mm512_setr_ps( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, @@ -51216,7 +53100,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_store_ps() { + const unsafe fn test_mm512_mask_store_ps() { #[repr(align(64))] struct Align { data: [f32; 16], @@ -51235,7 +53119,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_loadu_pd() { + const unsafe fn test_mm512_mask_loadu_pd() { let src = _mm512_set1_pd(42.0); let a = &[1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; let p = a.as_ptr(); @@ -51246,7 +53130,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_loadu_pd() { + const unsafe fn test_mm512_maskz_loadu_pd() { let a = &[1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; let p = a.as_ptr(); let m = 0b11001010; @@ -51256,7 +53140,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_load_pd() { + const unsafe fn test_mm512_mask_load_pd() { #[repr(align(64))] struct Align { data: [f64; 8], // 64 bytes @@ -51273,7 +53157,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_load_pd() { + const unsafe fn test_mm512_maskz_load_pd() { #[repr(align(64))] struct Align { data: [f64; 8], // 64 bytes @@ -51289,7 +53173,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_storeu_pd() { + const unsafe fn test_mm512_mask_storeu_pd() { let mut r = [42_f64; 8]; let a = _mm512_setr_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let m = 0b11001010; @@ -51299,7 +53183,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_store_pd() { + const unsafe fn test_mm512_mask_store_pd() { #[repr(align(64))] struct Align { data: [f64; 8], @@ -51313,7 +53197,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_loadu_epi32() { + const unsafe fn test_mm256_mask_loadu_epi32() { let src = _mm256_set1_epi32(42); let a = &[1_i32, 2, 3, 4, 5, 6, 7, 8]; let p = a.as_ptr(); @@ -51324,7 +53208,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_loadu_epi32() { + const unsafe fn test_mm256_maskz_loadu_epi32() { let a = &[1_i32, 2, 3, 4, 5, 6, 7, 8]; let p = a.as_ptr(); let m = 0b11001010; @@ -51334,7 +53218,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_load_epi32() { + const unsafe fn test_mm256_mask_load_epi32() { #[repr(align(32))] struct Align { data: [i32; 8], // 32 bytes @@ -51351,7 +53235,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_load_epi32() { + const unsafe fn test_mm256_maskz_load_epi32() { #[repr(align(32))] struct Align { data: [i32; 8], // 32 bytes @@ -51367,7 +53251,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_storeu_epi32() { + const unsafe fn test_mm256_mask_storeu_epi32() { let mut r = [42_i32; 8]; let a = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); let m = 0b11001010; @@ -51377,7 +53261,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_store_epi32() { + const unsafe fn test_mm256_mask_store_epi32() { #[repr(align(64))] struct Align { data: [i32; 8], @@ -51391,7 +53275,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_loadu_epi64() { + const unsafe fn test_mm256_mask_loadu_epi64() { let src = _mm256_set1_epi64x(42); let a = &[1_i64, 2, 3, 4]; let p = a.as_ptr(); @@ -51402,7 +53286,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_loadu_epi64() { + const unsafe fn test_mm256_maskz_loadu_epi64() { let a = &[1_i64, 2, 3, 4]; let p = a.as_ptr(); let m = 0b1010; @@ -51412,7 +53296,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_load_epi64() { + const unsafe fn test_mm256_mask_load_epi64() { #[repr(align(32))] struct Align { data: [i64; 4], // 32 bytes @@ -51429,7 +53313,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_load_epi64() { + const unsafe fn test_mm256_maskz_load_epi64() { #[repr(align(32))] struct Align { data: [i64; 4], // 32 bytes @@ -51445,7 +53329,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_storeu_epi64() { + const unsafe fn test_mm256_mask_storeu_epi64() { let mut r = [42_i64; 4]; let a = _mm256_setr_epi64x(1, 2, 3, 4); let m = 0b1010; @@ -51455,7 +53339,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_store_epi64() { + const unsafe fn test_mm256_mask_store_epi64() { #[repr(align(32))] struct Align { data: [i64; 4], @@ -51469,7 +53353,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_loadu_ps() { + const unsafe fn test_mm256_mask_loadu_ps() { let src = _mm256_set1_ps(42.0); let a = &[1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; let p = a.as_ptr(); @@ -51480,7 +53364,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_loadu_ps() { + const unsafe fn test_mm256_maskz_loadu_ps() { let a = &[1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; let p = a.as_ptr(); let m = 0b11001010; @@ -51490,7 +53374,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_load_ps() { + const unsafe fn test_mm256_mask_load_ps() { #[repr(align(32))] struct Align { data: [f32; 8], // 32 bytes @@ -51507,7 +53391,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_load_ps() { + const unsafe fn test_mm256_maskz_load_ps() { #[repr(align(32))] struct Align { data: [f32; 8], // 32 bytes @@ -51523,7 +53407,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_storeu_ps() { + const unsafe fn test_mm256_mask_storeu_ps() { let mut r = [42_f32; 8]; let a = _mm256_setr_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let m = 0b11001010; @@ -51533,7 +53417,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_store_ps() { + const unsafe fn test_mm256_mask_store_ps() { #[repr(align(32))] struct Align { data: [f32; 8], @@ -51547,7 +53431,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_loadu_pd() { + const unsafe fn test_mm256_mask_loadu_pd() { let src = _mm256_set1_pd(42.0); let a = &[1.0_f64, 2.0, 3.0, 4.0]; let p = a.as_ptr(); @@ -51558,7 +53442,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_loadu_pd() { + const unsafe fn test_mm256_maskz_loadu_pd() { let a = &[1.0_f64, 2.0, 3.0, 4.0]; let p = a.as_ptr(); let m = 0b1010; @@ -51568,7 +53452,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_load_pd() { + const unsafe fn test_mm256_mask_load_pd() { #[repr(align(32))] struct Align { data: [f64; 4], // 32 bytes @@ -51585,7 +53469,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_load_pd() { + const unsafe fn test_mm256_maskz_load_pd() { #[repr(align(32))] struct Align { data: [f64; 4], // 32 bytes @@ -51601,7 +53485,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_storeu_pd() { + const unsafe fn test_mm256_mask_storeu_pd() { let mut r = [42_f64; 4]; let a = _mm256_setr_pd(1.0, 2.0, 3.0, 4.0); let m = 0b1010; @@ -51611,7 +53495,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_store_pd() { + const unsafe fn test_mm256_mask_store_pd() { #[repr(align(32))] struct Align { data: [f64; 4], @@ -51625,7 +53509,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_loadu_epi32() { + const unsafe fn test_mm_mask_loadu_epi32() { let src = _mm_set1_epi32(42); let a = &[1_i32, 2, 3, 4]; let p = a.as_ptr(); @@ -51636,7 +53520,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_loadu_epi32() { + const unsafe fn test_mm_maskz_loadu_epi32() { let a = &[1_i32, 2, 3, 4]; let p = a.as_ptr(); let m = 0b1010; @@ -51646,7 +53530,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_load_epi32() { + const unsafe fn test_mm_mask_load_epi32() { #[repr(align(16))] struct Align { data: [i32; 4], // 32 bytes @@ -51663,7 +53547,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_load_epi32() { + const unsafe fn test_mm_maskz_load_epi32() { #[repr(align(16))] struct Align { data: [i32; 4], // 16 bytes @@ -51679,7 +53563,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_storeu_epi32() { + const unsafe fn test_mm_mask_storeu_epi32() { let mut r = [42_i32; 4]; let a = _mm_setr_epi32(1, 2, 3, 4); let m = 0b1010; @@ -51689,7 +53573,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_store_epi32() { + const unsafe fn test_mm_mask_store_epi32() { #[repr(align(16))] struct Align { data: [i32; 4], // 16 bytes @@ -51703,7 +53587,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_loadu_epi64() { + const unsafe fn test_mm_mask_loadu_epi64() { let src = _mm_set1_epi64x(42); let a = &[1_i64, 2]; let p = a.as_ptr(); @@ -51714,7 +53598,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_loadu_epi64() { + const unsafe fn test_mm_maskz_loadu_epi64() { let a = &[1_i64, 2]; let p = a.as_ptr(); let m = 0b10; @@ -51724,7 +53608,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_load_epi64() { + const unsafe fn test_mm_mask_load_epi64() { #[repr(align(16))] struct Align { data: [i64; 2], // 16 bytes @@ -51739,7 +53623,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_load_epi64() { + const unsafe fn test_mm_maskz_load_epi64() { #[repr(align(16))] struct Align { data: [i64; 2], // 16 bytes @@ -51753,7 +53637,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_storeu_epi64() { + const unsafe fn test_mm_mask_storeu_epi64() { let mut r = [42_i64; 2]; let a = _mm_setr_epi64x(1, 2); let m = 0b10; @@ -51763,7 +53647,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_store_epi64() { + const unsafe fn test_mm_mask_store_epi64() { #[repr(align(16))] struct Align { data: [i64; 2], // 16 bytes @@ -51777,7 +53661,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_loadu_ps() { + const unsafe fn test_mm_mask_loadu_ps() { let src = _mm_set1_ps(42.0); let a = &[1.0_f32, 2.0, 3.0, 4.0]; let p = a.as_ptr(); @@ -51788,7 +53672,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_loadu_ps() { + const unsafe fn test_mm_maskz_loadu_ps() { let a = &[1.0_f32, 2.0, 3.0, 4.0]; let p = a.as_ptr(); let m = 0b1010; @@ -51798,7 +53682,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_load_ps() { + const unsafe fn test_mm_mask_load_ps() { #[repr(align(16))] struct Align { data: [f32; 4], // 16 bytes @@ -51815,7 +53699,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_load_ps() { + const unsafe fn test_mm_maskz_load_ps() { #[repr(align(16))] struct Align { data: [f32; 4], // 16 bytes @@ -51831,7 +53715,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_storeu_ps() { + const unsafe fn test_mm_mask_storeu_ps() { let mut r = [42_f32; 4]; let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); let m = 0b1010; @@ -51841,7 +53725,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_store_ps() { + const unsafe fn test_mm_mask_store_ps() { #[repr(align(16))] struct Align { data: [f32; 4], // 16 bytes @@ -51855,7 +53739,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_loadu_pd() { + const unsafe fn test_mm_mask_loadu_pd() { let src = _mm_set1_pd(42.0); let a = &[1.0_f64, 2.0]; let p = a.as_ptr(); @@ -51866,7 +53750,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_loadu_pd() { + const unsafe fn test_mm_maskz_loadu_pd() { let a = &[1.0_f64, 2.0]; let p = a.as_ptr(); let m = 0b10; @@ -51876,7 +53760,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_load_pd() { + const unsafe fn test_mm_mask_load_pd() { #[repr(align(16))] struct Align { data: [f64; 2], // 16 bytes @@ -51893,7 +53777,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_load_pd() { + const unsafe fn test_mm_maskz_load_pd() { #[repr(align(16))] struct Align { data: [f64; 2], // 16 bytes @@ -51963,7 +53847,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_storeu_pd() { + const unsafe fn test_mm_mask_storeu_pd() { let mut r = [42_f64; 2]; let a = _mm_setr_pd(1.0, 2.0); let m = 0b10; @@ -51973,7 +53857,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_store_pd() { + const unsafe fn test_mm_mask_store_pd() { #[repr(align(16))] struct Align { data: [f64; 2], // 16 bytes @@ -52015,19 +53899,19 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setr_pd() { + const unsafe fn test_mm512_setr_pd() { let r = _mm512_set_pd(0., 1., 2., 3., 4., 5., 6., 7.); assert_eq_m512d(r, _mm512_setr_pd(7., 6., 5., 4., 3., 2., 1., 0.)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set_pd() { + const unsafe fn test_mm512_set_pd() { let r = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); assert_eq_m512d(r, _mm512_set_pd(7., 6., 5., 4., 3., 2., 1., 0.)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_rol_epi32() { + const unsafe fn test_mm512_rol_epi32() { let a = _mm512_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); let r = _mm512_rol_epi32::<1>(a); let e = _mm512_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2); @@ -52035,7 +53919,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_rol_epi32() { + const unsafe fn test_mm512_mask_rol_epi32() { let a = _mm512_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); let r = _mm512_mask_rol_epi32::<1>(a, 0, a); assert_eq_m512i(r, a); @@ -52045,7 +53929,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_rol_epi32() { + const unsafe fn test_mm512_maskz_rol_epi32() { let a = _mm512_set_epi32(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 << 31); let r = _mm512_maskz_rol_epi32::<1>(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -52055,7 +53939,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_rol_epi32() { + const unsafe fn test_mm256_rol_epi32() { let a = _mm256_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1); let r = _mm256_rol_epi32::<1>(a); let e = _mm256_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2); @@ -52063,7 +53947,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_rol_epi32() { + const unsafe fn test_mm256_mask_rol_epi32() { let a = _mm256_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1); let r = _mm256_mask_rol_epi32::<1>(a, 0, a); assert_eq_m256i(r, a); @@ -52073,7 +53957,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_rol_epi32() { + const unsafe fn test_mm256_maskz_rol_epi32() { let a = _mm256_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1); let r = _mm256_maskz_rol_epi32::<1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -52083,7 +53967,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_rol_epi32() { + const unsafe fn test_mm_rol_epi32() { let a = _mm_set_epi32(1 << 31, 1, 1, 1); let r = _mm_rol_epi32::<1>(a); let e = _mm_set_epi32(1 << 0, 2, 2, 2); @@ -52091,7 +53975,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_rol_epi32() { + const unsafe fn test_mm_mask_rol_epi32() { let a = _mm_set_epi32(1 << 31, 1, 1, 1); let r = _mm_mask_rol_epi32::<1>(a, 0, a); assert_eq_m128i(r, a); @@ -52101,7 +53985,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_rol_epi32() { + const unsafe fn test_mm_maskz_rol_epi32() { let a = _mm_set_epi32(1 << 31, 1, 1, 1); let r = _mm_maskz_rol_epi32::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -52111,7 +53995,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_ror_epi32() { + const unsafe fn test_mm512_ror_epi32() { let a = _mm512_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2); let r = _mm512_ror_epi32::<1>(a); let e = _mm512_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); @@ -52119,7 +54003,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_ror_epi32() { + const unsafe fn test_mm512_mask_ror_epi32() { let a = _mm512_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2); let r = _mm512_mask_ror_epi32::<1>(a, 0, a); assert_eq_m512i(r, a); @@ -52129,7 +54013,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_ror_epi32() { + const unsafe fn test_mm512_maskz_ror_epi32() { let a = _mm512_set_epi32(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1 << 0); let r = _mm512_maskz_ror_epi32::<1>(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -52139,7 +54023,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_ror_epi32() { + const unsafe fn test_mm256_ror_epi32() { let a = _mm256_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2); let r = _mm256_ror_epi32::<1>(a); let e = _mm256_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1); @@ -52147,7 +54031,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_ror_epi32() { + const unsafe fn test_mm256_mask_ror_epi32() { let a = _mm256_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2); let r = _mm256_mask_ror_epi32::<1>(a, 0, a); assert_eq_m256i(r, a); @@ -52157,7 +54041,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_ror_epi32() { + const unsafe fn test_mm256_maskz_ror_epi32() { let a = _mm256_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2); let r = _mm256_maskz_ror_epi32::<1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -52167,7 +54051,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_ror_epi32() { + const unsafe fn test_mm_ror_epi32() { let a = _mm_set_epi32(1 << 0, 2, 2, 2); let r = _mm_ror_epi32::<1>(a); let e = _mm_set_epi32(1 << 31, 1, 1, 1); @@ -52175,7 +54059,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_ror_epi32() { + const unsafe fn test_mm_mask_ror_epi32() { let a = _mm_set_epi32(1 << 0, 2, 2, 2); let r = _mm_mask_ror_epi32::<1>(a, 0, a); assert_eq_m128i(r, a); @@ -52185,7 +54069,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_ror_epi32() { + const unsafe fn test_mm_maskz_ror_epi32() { let a = _mm_set_epi32(1 << 0, 2, 2, 2); let r = _mm_maskz_ror_epi32::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -52195,7 +54079,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_slli_epi32() { + const unsafe fn test_mm512_slli_epi32() { let a = _mm512_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); let r = _mm512_slli_epi32::<1>(a); let e = _mm512_set_epi32(0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2); @@ -52203,7 +54087,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_slli_epi32() { + const unsafe fn test_mm512_mask_slli_epi32() { let a = _mm512_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); let r = _mm512_mask_slli_epi32::<1>(a, 0, a); assert_eq_m512i(r, a); @@ -52213,7 +54097,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_slli_epi32() { + const unsafe fn test_mm512_maskz_slli_epi32() { let a = _mm512_set_epi32(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 << 31); let r = _mm512_maskz_slli_epi32::<1>(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -52223,7 +54107,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_slli_epi32() { + const unsafe fn test_mm256_mask_slli_epi32() { let a = _mm256_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1); let r = _mm256_mask_slli_epi32::<1>(a, 0, a); assert_eq_m256i(r, a); @@ -52233,7 +54117,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_slli_epi32() { + const unsafe fn test_mm256_maskz_slli_epi32() { let a = _mm256_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1); let r = _mm256_maskz_slli_epi32::<1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -52243,7 +54127,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_slli_epi32() { + const unsafe fn test_mm_mask_slli_epi32() { let a = _mm_set_epi32(1 << 31, 1, 1, 1); let r = _mm_mask_slli_epi32::<1>(a, 0, a); assert_eq_m128i(r, a); @@ -52253,7 +54137,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_slli_epi32() { + const unsafe fn test_mm_maskz_slli_epi32() { let a = _mm_set_epi32(1 << 31, 1, 1, 1); let r = _mm_maskz_slli_epi32::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -52263,7 +54147,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_srli_epi32() { + const unsafe fn test_mm512_srli_epi32() { let a = _mm512_set_epi32(0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2); let r = _mm512_srli_epi32::<1>(a); let e = _mm512_set_epi32(0 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); @@ -52271,7 +54155,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_srli_epi32() { + const unsafe fn test_mm512_mask_srli_epi32() { let a = _mm512_set_epi32(0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2); let r = _mm512_mask_srli_epi32::<1>(a, 0, a); assert_eq_m512i(r, a); @@ -52281,7 +54165,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_srli_epi32() { + const unsafe fn test_mm512_maskz_srli_epi32() { let a = _mm512_set_epi32(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0); let r = _mm512_maskz_srli_epi32::<1>(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -52291,7 +54175,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_srli_epi32() { + const unsafe fn test_mm256_mask_srli_epi32() { let a = _mm256_set_epi32(1 << 5, 0, 0, 0, 0, 0, 0, 0); let r = _mm256_mask_srli_epi32::<1>(a, 0, a); assert_eq_m256i(r, a); @@ -52301,7 +54185,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_srli_epi32() { + const unsafe fn test_mm256_maskz_srli_epi32() { let a = _mm256_set_epi32(1 << 5, 0, 0, 0, 0, 0, 0, 0); let r = _mm256_maskz_srli_epi32::<1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -52311,7 +54195,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_srli_epi32() { + const unsafe fn test_mm_mask_srli_epi32() { let a = _mm_set_epi32(1 << 5, 0, 0, 0); let r = _mm_mask_srli_epi32::<1>(a, 0, a); assert_eq_m128i(r, a); @@ -52321,7 +54205,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_srli_epi32() { + const unsafe fn test_mm_maskz_srli_epi32() { let a = _mm_set_epi32(1 << 5, 0, 0, 0); let r = _mm_maskz_srli_epi32::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -52331,7 +54215,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_rolv_epi32() { + const unsafe fn test_mm512_rolv_epi32() { let a = _mm512_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); let b = _mm512_set1_epi32(1); let r = _mm512_rolv_epi32(a, b); @@ -52340,7 +54224,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_rolv_epi32() { + const unsafe fn test_mm512_mask_rolv_epi32() { let a = _mm512_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); let b = _mm512_set1_epi32(1); let r = _mm512_mask_rolv_epi32(a, 0, a, b); @@ -52351,7 +54235,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_rolv_epi32() { + const unsafe fn test_mm512_maskz_rolv_epi32() { let a = _mm512_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 << 31); let b = _mm512_set1_epi32(1); let r = _mm512_maskz_rolv_epi32(0, a, b); @@ -52362,7 +54246,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_rolv_epi32() { + const unsafe fn test_mm256_rolv_epi32() { let a = _mm256_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1); let b = _mm256_set1_epi32(1); let r = _mm256_rolv_epi32(a, b); @@ -52371,7 +54255,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_rolv_epi32() { + const unsafe fn test_mm256_mask_rolv_epi32() { let a = _mm256_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1); let b = _mm256_set1_epi32(1); let r = _mm256_mask_rolv_epi32(a, 0, a, b); @@ -52382,7 +54266,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_rolv_epi32() { + const unsafe fn test_mm256_maskz_rolv_epi32() { let a = _mm256_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1); let b = _mm256_set1_epi32(1); let r = _mm256_maskz_rolv_epi32(0, a, b); @@ -52393,7 +54277,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_rolv_epi32() { + const unsafe fn test_mm_rolv_epi32() { let a = _mm_set_epi32(1 << 31, 1, 1, 1); let b = _mm_set1_epi32(1); let r = _mm_rolv_epi32(a, b); @@ -52402,7 +54286,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_rolv_epi32() { + const unsafe fn test_mm_mask_rolv_epi32() { let a = _mm_set_epi32(1 << 31, 1, 1, 1); let b = _mm_set1_epi32(1); let r = _mm_mask_rolv_epi32(a, 0, a, b); @@ -52413,7 +54297,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_rolv_epi32() { + const unsafe fn test_mm_maskz_rolv_epi32() { let a = _mm_set_epi32(1 << 31, 1, 1, 1); let b = _mm_set1_epi32(1); let r = _mm_maskz_rolv_epi32(0, a, b); @@ -52424,7 +54308,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_rorv_epi32() { + const unsafe fn test_mm512_rorv_epi32() { let a = _mm512_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2); let b = _mm512_set1_epi32(1); let r = _mm512_rorv_epi32(a, b); @@ -52433,7 +54317,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_rorv_epi32() { + const unsafe fn test_mm512_mask_rorv_epi32() { let a = _mm512_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2); let b = _mm512_set1_epi32(1); let r = _mm512_mask_rorv_epi32(a, 0, a, b); @@ -52444,7 +54328,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_rorv_epi32() { + const unsafe fn test_mm512_maskz_rorv_epi32() { let a = _mm512_set_epi32(3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1 << 0); let b = _mm512_set1_epi32(1); let r = _mm512_maskz_rorv_epi32(0, a, b); @@ -52455,7 +54339,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_rorv_epi32() { + const unsafe fn test_mm256_rorv_epi32() { let a = _mm256_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2); let b = _mm256_set1_epi32(1); let r = _mm256_rorv_epi32(a, b); @@ -52464,7 +54348,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_rorv_epi32() { + const unsafe fn test_mm256_mask_rorv_epi32() { let a = _mm256_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2); let b = _mm256_set1_epi32(1); let r = _mm256_mask_rorv_epi32(a, 0, a, b); @@ -52475,7 +54359,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_rorv_epi32() { + const unsafe fn test_mm256_maskz_rorv_epi32() { let a = _mm256_set_epi32(1 << 0, 2, 2, 2, 2, 2, 2, 2); let b = _mm256_set1_epi32(1); let r = _mm256_maskz_rorv_epi32(0, a, b); @@ -52486,7 +54370,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_rorv_epi32() { + const unsafe fn test_mm_rorv_epi32() { let a = _mm_set_epi32(1 << 0, 2, 2, 2); let b = _mm_set1_epi32(1); let r = _mm_rorv_epi32(a, b); @@ -52495,7 +54379,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_rorv_epi32() { + const unsafe fn test_mm_mask_rorv_epi32() { let a = _mm_set_epi32(1 << 0, 2, 2, 2); let b = _mm_set1_epi32(1); let r = _mm_mask_rorv_epi32(a, 0, a, b); @@ -52506,7 +54390,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_rorv_epi32() { + const unsafe fn test_mm_maskz_rorv_epi32() { let a = _mm_set_epi32(1 << 0, 2, 2, 2); let b = _mm_set1_epi32(1); let r = _mm_maskz_rorv_epi32(0, a, b); @@ -52517,7 +54401,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_sllv_epi32() { + const unsafe fn test_mm512_sllv_epi32() { let a = _mm512_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); let count = _mm512_set1_epi32(1); let r = _mm512_sllv_epi32(a, count); @@ -52526,7 +54410,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_sllv_epi32() { + const unsafe fn test_mm512_mask_sllv_epi32() { let a = _mm512_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); let count = _mm512_set1_epi32(1); let r = _mm512_mask_sllv_epi32(a, 0, a, count); @@ -52537,7 +54421,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_sllv_epi32() { + const unsafe fn test_mm512_maskz_sllv_epi32() { let a = _mm512_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 << 31); let count = _mm512_set_epi32(0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); let r = _mm512_maskz_sllv_epi32(0, a, count); @@ -52548,7 +54432,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_sllv_epi32() { + const unsafe fn test_mm256_mask_sllv_epi32() { let a = _mm256_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1); let count = _mm256_set1_epi32(1); let r = _mm256_mask_sllv_epi32(a, 0, a, count); @@ -52559,7 +54443,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_sllv_epi32() { + const unsafe fn test_mm256_maskz_sllv_epi32() { let a = _mm256_set_epi32(1 << 31, 1, 1, 1, 1, 1, 1, 1); let count = _mm256_set1_epi32(1); let r = _mm256_maskz_sllv_epi32(0, a, count); @@ -52570,7 +54454,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_sllv_epi32() { + const unsafe fn test_mm_mask_sllv_epi32() { let a = _mm_set_epi32(1 << 31, 1, 1, 1); let count = _mm_set1_epi32(1); let r = _mm_mask_sllv_epi32(a, 0, a, count); @@ -52581,7 +54465,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_sllv_epi32() { + const unsafe fn test_mm_maskz_sllv_epi32() { let a = _mm_set_epi32(1 << 31, 1, 1, 1); let count = _mm_set1_epi32(1); let r = _mm_maskz_sllv_epi32(0, a, count); @@ -52592,7 +54476,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_srlv_epi32() { + const unsafe fn test_mm512_srlv_epi32() { let a = _mm512_set_epi32(0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2); let count = _mm512_set1_epi32(1); let r = _mm512_srlv_epi32(a, count); @@ -52601,7 +54485,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_srlv_epi32() { + const unsafe fn test_mm512_mask_srlv_epi32() { let a = _mm512_set_epi32(0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2); let count = _mm512_set1_epi32(1); let r = _mm512_mask_srlv_epi32(a, 0, a, count); @@ -52612,7 +54496,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_srlv_epi32() { + const unsafe fn test_mm512_maskz_srlv_epi32() { let a = _mm512_set_epi32(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0); let count = _mm512_set_epi32(0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); let r = _mm512_maskz_srlv_epi32(0, a, count); @@ -52623,7 +54507,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_srlv_epi32() { + const unsafe fn test_mm256_mask_srlv_epi32() { let a = _mm256_set_epi32(1 << 5, 0, 0, 0, 0, 0, 0, 0); let count = _mm256_set1_epi32(1); let r = _mm256_mask_srlv_epi32(a, 0, a, count); @@ -52634,7 +54518,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_srlv_epi32() { + const unsafe fn test_mm256_maskz_srlv_epi32() { let a = _mm256_set_epi32(1 << 5, 0, 0, 0, 0, 0, 0, 0); let count = _mm256_set1_epi32(1); let r = _mm256_maskz_srlv_epi32(0, a, count); @@ -52645,7 +54529,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_srlv_epi32() { + const unsafe fn test_mm_mask_srlv_epi32() { let a = _mm_set_epi32(1 << 5, 0, 0, 0); let count = _mm_set1_epi32(1); let r = _mm_mask_srlv_epi32(a, 0, a, count); @@ -52656,7 +54540,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_srlv_epi32() { + const unsafe fn test_mm_maskz_srlv_epi32() { let a = _mm_set_epi32(1 << 5, 0, 0, 0); let count = _mm_set1_epi32(1); let r = _mm_maskz_srlv_epi32(0, a, count); @@ -52940,7 +54824,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_srav_epi32() { + const unsafe fn test_mm512_srav_epi32() { let a = _mm512_set_epi32(8, -8, 16, -15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1); let count = _mm512_set_epi32(2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); let r = _mm512_srav_epi32(a, count); @@ -52949,7 +54833,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_srav_epi32() { + const unsafe fn test_mm512_mask_srav_epi32() { let a = _mm512_set_epi32(8, -8, 16, -15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16); let count = _mm512_set_epi32(2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1); let r = _mm512_mask_srav_epi32(a, 0, a, count); @@ -52960,7 +54844,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_srav_epi32() { + const unsafe fn test_mm512_maskz_srav_epi32() { let a = _mm512_set_epi32(8, -8, 16, -15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -15, -14); let count = _mm512_set_epi32(2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2); let r = _mm512_maskz_srav_epi32(0, a, count); @@ -52971,7 +54855,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_srav_epi32() { + const unsafe fn test_mm256_mask_srav_epi32() { let a = _mm256_set_epi32(1 << 5, 0, 0, 0, 0, 0, 0, 0); let count = _mm256_set1_epi32(1); let r = _mm256_mask_srav_epi32(a, 0, a, count); @@ -52982,7 +54866,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_srav_epi32() { + const unsafe fn test_mm256_maskz_srav_epi32() { let a = _mm256_set_epi32(1 << 5, 0, 0, 0, 0, 0, 0, 0); let count = _mm256_set1_epi32(1); let r = _mm256_maskz_srav_epi32(0, a, count); @@ -52993,7 +54877,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_srav_epi32() { + const unsafe fn test_mm_mask_srav_epi32() { let a = _mm_set_epi32(1 << 5, 0, 0, 0); let count = _mm_set1_epi32(1); let r = _mm_mask_srav_epi32(a, 0, a, count); @@ -53004,7 +54888,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_srav_epi32() { + const unsafe fn test_mm_maskz_srav_epi32() { let a = _mm_set_epi32(1 << 5, 0, 0, 0); let count = _mm_set1_epi32(1); let r = _mm_maskz_srav_epi32(0, a, count); @@ -53015,7 +54899,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_srai_epi32() { + const unsafe fn test_mm512_srai_epi32() { let a = _mm512_set_epi32(8, -8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, -15); let r = _mm512_srai_epi32::<2>(a); let e = _mm512_set_epi32(2, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, -4); @@ -53023,7 +54907,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_srai_epi32() { + const unsafe fn test_mm512_mask_srai_epi32() { let a = _mm512_set_epi32(8, -8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, -15); let r = _mm512_mask_srai_epi32::<2>(a, 0, a); assert_eq_m512i(r, a); @@ -53033,7 +54917,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_srai_epi32() { + const unsafe fn test_mm512_maskz_srai_epi32() { let a = _mm512_set_epi32(8, -8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, -15); let r = _mm512_maskz_srai_epi32::<2>(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -53043,7 +54927,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_srai_epi32() { + const unsafe fn test_mm256_mask_srai_epi32() { let a = _mm256_set_epi32(1 << 5, 0, 0, 0, 0, 0, 0, 0); let r = _mm256_mask_srai_epi32::<1>(a, 0, a); assert_eq_m256i(r, a); @@ -53053,7 +54937,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_srai_epi32() { + const unsafe fn test_mm256_maskz_srai_epi32() { let a = _mm256_set_epi32(1 << 5, 0, 0, 0, 0, 0, 0, 0); let r = _mm256_maskz_srai_epi32::<1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -53063,7 +54947,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_srai_epi32() { + const unsafe fn test_mm_mask_srai_epi32() { let a = _mm_set_epi32(1 << 5, 0, 0, 0); let r = _mm_mask_srai_epi32::<1>(a, 0, a); assert_eq_m128i(r, a); @@ -53073,7 +54957,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_srai_epi32() { + const unsafe fn test_mm_maskz_srai_epi32() { let a = _mm_set_epi32(1 << 5, 0, 0, 0); let r = _mm_maskz_srai_epi32::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -53083,7 +54967,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_permute_ps() { + const unsafe fn test_mm512_permute_ps() { let a = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., ); @@ -53095,7 +54979,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_permute_ps() { + const unsafe fn test_mm512_mask_permute_ps() { let a = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., ); @@ -53109,7 +54993,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_permute_ps() { + const unsafe fn test_mm512_maskz_permute_ps() { let a = _mm512_setr_ps( 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., ); @@ -53123,7 +55007,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_permute_ps() { + const unsafe fn test_mm256_mask_permute_ps() { let a = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let r = _mm256_mask_permute_ps::<0b11_11_11_11>(a, 0, a); assert_eq_m256(r, a); @@ -53133,7 +55017,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_permute_ps() { + const unsafe fn test_mm256_maskz_permute_ps() { let a = _mm256_set_ps(0., 1., 2., 3., 4., 5., 6., 7.); let r = _mm256_maskz_permute_ps::<0b11_11_11_11>(0, a); assert_eq_m256(r, _mm256_setzero_ps()); @@ -53143,7 +55027,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_permute_ps() { + const unsafe fn test_mm_mask_permute_ps() { let a = _mm_set_ps(0., 1., 2., 3.); let r = _mm_mask_permute_ps::<0b11_11_11_11>(a, 0, a); assert_eq_m128(r, a); @@ -53153,7 +55037,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_permute_ps() { + const unsafe fn test_mm_maskz_permute_ps() { let a = _mm_set_ps(0., 1., 2., 3.); let r = _mm_maskz_permute_ps::<0b11_11_11_11>(0, a); assert_eq_m128(r, _mm_setzero_ps()); @@ -53752,7 +55636,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_shuffle_epi32() { + const unsafe fn test_mm512_shuffle_epi32() { let a = _mm512_setr_epi32(1, 4, 5, 8, 9, 12, 13, 16, 1, 4, 5, 8, 9, 12, 13, 16); let r = _mm512_shuffle_epi32::<_MM_PERM_AADD>(a); let e = _mm512_setr_epi32(8, 8, 1, 1, 16, 16, 9, 9, 8, 8, 1, 1, 16, 16, 9, 9); @@ -53760,7 +55644,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_shuffle_epi32() { + const unsafe fn test_mm512_mask_shuffle_epi32() { let a = _mm512_setr_epi32(1, 4, 5, 8, 9, 12, 13, 16, 1, 4, 5, 8, 9, 12, 13, 16); let r = _mm512_mask_shuffle_epi32::<_MM_PERM_AADD>(a, 0, a); assert_eq_m512i(r, a); @@ -53770,7 +55654,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_shuffle_epi32() { + const unsafe fn test_mm512_maskz_shuffle_epi32() { let a = _mm512_setr_epi32(1, 4, 5, 8, 9, 12, 13, 16, 1, 4, 5, 8, 9, 12, 13, 16); let r = _mm512_maskz_shuffle_epi32::<_MM_PERM_AADD>(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -53780,7 +55664,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_shuffle_epi32() { + const unsafe fn test_mm256_mask_shuffle_epi32() { let a = _mm256_set_epi32(1, 4, 5, 8, 9, 12, 13, 16); let r = _mm256_mask_shuffle_epi32::<_MM_PERM_AADD>(a, 0, a); assert_eq_m256i(r, a); @@ -53790,7 +55674,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_shuffle_epi32() { + const unsafe fn test_mm256_maskz_shuffle_epi32() { let a = _mm256_set_epi32(1, 4, 5, 8, 9, 12, 13, 16); let r = _mm256_maskz_shuffle_epi32::<_MM_PERM_AADD>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -53800,7 +55684,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_shuffle_epi32() { + const unsafe fn test_mm_mask_shuffle_epi32() { let a = _mm_set_epi32(1, 4, 5, 8); let r = _mm_mask_shuffle_epi32::<_MM_PERM_AADD>(a, 0, a); assert_eq_m128i(r, a); @@ -53810,7 +55694,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_shuffle_epi32() { + const unsafe fn test_mm_maskz_shuffle_epi32() { let a = _mm_set_epi32(1, 4, 5, 8); let r = _mm_maskz_shuffle_epi32::<_MM_PERM_AADD>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -53820,7 +55704,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_shuffle_ps() { + const unsafe fn test_mm512_shuffle_ps() { let a = _mm512_setr_ps( 1., 4., 5., 8., 9., 12., 13., 16., 1., 4., 5., 8., 9., 12., 13., 16., ); @@ -53835,7 +55719,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_shuffle_ps() { + const unsafe fn test_mm512_mask_shuffle_ps() { let a = _mm512_setr_ps( 1., 4., 5., 8., 9., 12., 13., 16., 1., 4., 5., 8., 9., 12., 13., 16., ); @@ -53852,7 +55736,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_shuffle_ps() { + const unsafe fn test_mm512_maskz_shuffle_ps() { let a = _mm512_setr_ps( 1., 4., 5., 8., 9., 12., 13., 16., 1., 4., 5., 8., 9., 12., 13., 16., ); @@ -53869,7 +55753,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_shuffle_ps() { + const unsafe fn test_mm256_mask_shuffle_ps() { let a = _mm256_set_ps(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm256_set_ps(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm256_mask_shuffle_ps::<0b11_11_11_11>(a, 0, a, b); @@ -53880,7 +55764,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_shuffle_ps() { + const unsafe fn test_mm256_maskz_shuffle_ps() { let a = _mm256_set_ps(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm256_set_ps(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm256_maskz_shuffle_ps::<0b11_11_11_11>(0, a, b); @@ -53891,7 +55775,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_shuffle_ps() { + const unsafe fn test_mm_mask_shuffle_ps() { let a = _mm_set_ps(1., 4., 5., 8.); let b = _mm_set_ps(2., 3., 6., 7.); let r = _mm_mask_shuffle_ps::<0b11_11_11_11>(a, 0, a, b); @@ -53902,7 +55786,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_shuffle_ps() { + const unsafe fn test_mm_maskz_shuffle_ps() { let a = _mm_set_ps(1., 4., 5., 8.); let b = _mm_set_ps(2., 3., 6., 7.); let r = _mm_maskz_shuffle_ps::<0b11_11_11_11>(0, a, b); @@ -53913,7 +55797,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_shuffle_i32x4() { + const unsafe fn test_mm512_shuffle_i32x4() { let a = _mm512_setr_epi32(1, 4, 5, 8, 9, 12, 13, 16, 1, 4, 5, 8, 9, 12, 13, 16); let b = _mm512_setr_epi32(2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15); let r = _mm512_shuffle_i32x4::<0b00_00_00_00>(a, b); @@ -53922,7 +55806,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_shuffle_i32x4() { + const unsafe fn test_mm512_mask_shuffle_i32x4() { let a = _mm512_setr_epi32(1, 4, 5, 8, 9, 12, 13, 16, 1, 4, 5, 8, 9, 12, 13, 16); let b = _mm512_setr_epi32(2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15); let r = _mm512_mask_shuffle_i32x4::<0b00_00_00_00>(a, 0, a, b); @@ -53933,7 +55817,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_shuffle_i32x4() { + const unsafe fn test_mm512_maskz_shuffle_i32x4() { let a = _mm512_setr_epi32(1, 4, 5, 8, 9, 12, 13, 16, 1, 4, 5, 8, 9, 12, 13, 16); let b = _mm512_setr_epi32(2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15); let r = _mm512_maskz_shuffle_i32x4::<0b00_00_00_00>(0, a, b); @@ -53944,7 +55828,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_shuffle_i32x4() { + const unsafe fn test_mm256_shuffle_i32x4() { let a = _mm256_set_epi32(1, 4, 5, 8, 9, 12, 13, 16); let b = _mm256_set_epi32(2, 3, 6, 7, 10, 11, 14, 15); let r = _mm256_shuffle_i32x4::<0b00>(a, b); @@ -53953,7 +55837,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_shuffle_i32x4() { + const unsafe fn test_mm256_mask_shuffle_i32x4() { let a = _mm256_set_epi32(1, 4, 5, 8, 9, 12, 13, 16); let b = _mm256_set_epi32(2, 3, 6, 7, 10, 11, 14, 15); let r = _mm256_mask_shuffle_i32x4::<0b00>(a, 0, a, b); @@ -53964,7 +55848,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_shuffle_i32x4() { + const unsafe fn test_mm256_maskz_shuffle_i32x4() { let a = _mm256_set_epi32(1, 4, 5, 8, 9, 12, 13, 16); let b = _mm256_set_epi32(2, 3, 6, 7, 10, 11, 14, 15); let r = _mm256_maskz_shuffle_i32x4::<0b00>(0, a, b); @@ -53975,7 +55859,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_shuffle_f32x4() { + const unsafe fn test_mm512_shuffle_f32x4() { let a = _mm512_setr_ps( 1., 4., 5., 8., 9., 12., 13., 16., 1., 4., 5., 8., 9., 12., 13., 16., ); @@ -53990,7 +55874,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_shuffle_f32x4() { + const unsafe fn test_mm512_mask_shuffle_f32x4() { let a = _mm512_setr_ps( 1., 4., 5., 8., 9., 12., 13., 16., 1., 4., 5., 8., 9., 12., 13., 16., ); @@ -54007,7 +55891,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_shuffle_f32x4() { + const unsafe fn test_mm512_maskz_shuffle_f32x4() { let a = _mm512_setr_ps( 1., 4., 5., 8., 9., 12., 13., 16., 1., 4., 5., 8., 9., 12., 13., 16., ); @@ -54024,7 +55908,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_shuffle_f32x4() { + const unsafe fn test_mm256_shuffle_f32x4() { let a = _mm256_set_ps(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm256_set_ps(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm256_shuffle_f32x4::<0b00>(a, b); @@ -54033,7 +55917,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_shuffle_f32x4() { + const unsafe fn test_mm256_mask_shuffle_f32x4() { let a = _mm256_set_ps(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm256_set_ps(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm256_mask_shuffle_f32x4::<0b00>(a, 0, a, b); @@ -54044,7 +55928,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_shuffle_f32x4() { + const unsafe fn test_mm256_maskz_shuffle_f32x4() { let a = _mm256_set_ps(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm256_set_ps(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm256_maskz_shuffle_f32x4::<0b00>(0, a, b); @@ -54055,7 +55939,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_extractf32x4_ps() { + const unsafe fn test_mm512_extractf32x4_ps() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54065,7 +55949,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_extractf32x4_ps() { + const unsafe fn test_mm512_mask_extractf32x4_ps() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54078,7 +55962,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_extractf32x4_ps() { + const unsafe fn test_mm512_maskz_extractf32x4_ps() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54090,7 +55974,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_extractf32x4_ps() { + const unsafe fn test_mm256_extractf32x4_ps() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_extractf32x4_ps::<1>(a); let e = _mm_set_ps(1., 2., 3., 4.); @@ -54098,7 +55982,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_extractf32x4_ps() { + const unsafe fn test_mm256_mask_extractf32x4_ps() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let src = _mm_set1_ps(100.); let r = _mm256_mask_extractf32x4_ps::<1>(src, 0, a); @@ -54109,7 +55993,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_extractf32x4_ps() { + const unsafe fn test_mm256_maskz_extractf32x4_ps() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_maskz_extractf32x4_ps::<1>(0, a); assert_eq_m128(r, _mm_setzero_ps()); @@ -54119,7 +56003,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_extracti32x4_epi32() { + const unsafe fn test_mm512_extracti32x4_epi32() { let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_extracti32x4_epi32::<1>(a); let e = _mm_setr_epi32(5, 6, 7, 8); @@ -54127,7 +56011,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_extracti32x4_epi32() { + const unsafe fn test_mm512_mask_extracti32x4_epi32() { let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let src = _mm_set1_epi32(100); let r = _mm512_mask_extracti32x4_epi32::<1>(src, 0, a); @@ -54138,7 +56022,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm512_maskz_extracti32x4_epi32() { + const unsafe fn test_mm512_maskz_extracti32x4_epi32() { let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_maskz_extracti32x4_epi32::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -54148,7 +56032,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_extracti32x4_epi32() { + const unsafe fn test_mm256_extracti32x4_epi32() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_extracti32x4_epi32::<1>(a); let e = _mm_set_epi32(1, 2, 3, 4); @@ -54156,7 +56040,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_extracti32x4_epi32() { + const unsafe fn test_mm256_mask_extracti32x4_epi32() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let src = _mm_set1_epi32(100); let r = _mm256_mask_extracti32x4_epi32::<1>(src, 0, a); @@ -54167,7 +56051,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_extracti32x4_epi32() { + const unsafe fn test_mm256_maskz_extracti32x4_epi32() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_maskz_extracti32x4_epi32::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -54177,7 +56061,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_moveldup_ps() { + const unsafe fn test_mm512_moveldup_ps() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54189,7 +56073,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_moveldup_ps() { + const unsafe fn test_mm512_mask_moveldup_ps() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54203,7 +56087,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_moveldup_ps() { + const unsafe fn test_mm512_maskz_moveldup_ps() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54217,7 +56101,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_moveldup_ps() { + const unsafe fn test_mm256_mask_moveldup_ps() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_mask_moveldup_ps(a, 0, a); assert_eq_m256(r, a); @@ -54227,7 +56111,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_moveldup_ps() { + const unsafe fn test_mm256_maskz_moveldup_ps() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_maskz_moveldup_ps(0, a); assert_eq_m256(r, _mm256_setzero_ps()); @@ -54237,7 +56121,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_moveldup_ps() { + const unsafe fn test_mm_mask_moveldup_ps() { let a = _mm_set_ps(1., 2., 3., 4.); let r = _mm_mask_moveldup_ps(a, 0, a); assert_eq_m128(r, a); @@ -54247,7 +56131,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_moveldup_ps() { + const unsafe fn test_mm_maskz_moveldup_ps() { let a = _mm_set_ps(1., 2., 3., 4.); let r = _mm_maskz_moveldup_ps(0, a); assert_eq_m128(r, _mm_setzero_ps()); @@ -54257,7 +56141,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_movehdup_ps() { + const unsafe fn test_mm512_movehdup_ps() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54269,7 +56153,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_movehdup_ps() { + const unsafe fn test_mm512_mask_movehdup_ps() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54283,7 +56167,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_movehdup_ps() { + const unsafe fn test_mm512_maskz_movehdup_ps() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54297,7 +56181,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_movehdup_ps() { + const unsafe fn test_mm256_mask_movehdup_ps() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_mask_movehdup_ps(a, 0, a); assert_eq_m256(r, a); @@ -54307,7 +56191,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_movehdup_ps() { + const unsafe fn test_mm256_maskz_movehdup_ps() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_maskz_movehdup_ps(0, a); assert_eq_m256(r, _mm256_setzero_ps()); @@ -54317,7 +56201,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_movehdup_ps() { + const unsafe fn test_mm_mask_movehdup_ps() { let a = _mm_set_ps(1., 2., 3., 4.); let r = _mm_mask_movehdup_ps(a, 0, a); assert_eq_m128(r, a); @@ -54327,7 +56211,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_movehdup_ps() { + const unsafe fn test_mm_maskz_movehdup_ps() { let a = _mm_set_ps(1., 2., 3., 4.); let r = _mm_maskz_movehdup_ps(0, a); assert_eq_m128(r, _mm_setzero_ps()); @@ -54337,7 +56221,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_inserti32x4() { + const unsafe fn test_mm512_inserti32x4() { let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm_setr_epi32(17, 18, 19, 20); let r = _mm512_inserti32x4::<0>(a, b); @@ -54346,7 +56230,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_inserti32x4() { + const unsafe fn test_mm512_mask_inserti32x4() { let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm_setr_epi32(17, 18, 19, 20); let r = _mm512_mask_inserti32x4::<0>(a, 0, a, b); @@ -54357,7 +56241,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_inserti32x4() { + const unsafe fn test_mm512_maskz_inserti32x4() { let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm_setr_epi32(17, 18, 19, 20); let r = _mm512_maskz_inserti32x4::<0>(0, a, b); @@ -54368,7 +56252,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_inserti32x4() { + const unsafe fn test_mm256_inserti32x4() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_set_epi32(17, 18, 19, 20); let r = _mm256_inserti32x4::<1>(a, b); @@ -54377,7 +56261,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_inserti32x4() { + const unsafe fn test_mm256_mask_inserti32x4() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_set_epi32(17, 18, 19, 20); let r = _mm256_mask_inserti32x4::<0>(a, 0, a, b); @@ -54388,7 +56272,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_inserti32x4() { + const unsafe fn test_mm256_maskz_inserti32x4() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_set_epi32(17, 18, 19, 20); let r = _mm256_maskz_inserti32x4::<0>(0, a, b); @@ -54399,7 +56283,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_insertf32x4() { + const unsafe fn test_mm512_insertf32x4() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54412,7 +56296,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_insertf32x4() { + const unsafe fn test_mm512_mask_insertf32x4() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54427,7 +56311,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_insertf32x4() { + const unsafe fn test_mm512_maskz_insertf32x4() { let a = _mm512_setr_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54442,7 +56326,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_insertf32x4() { + const unsafe fn test_mm256_insertf32x4() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm_set_ps(17., 18., 19., 20.); let r = _mm256_insertf32x4::<1>(a, b); @@ -54451,7 +56335,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_insertf32x4() { + const unsafe fn test_mm256_mask_insertf32x4() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm_set_ps(17., 18., 19., 20.); let r = _mm256_mask_insertf32x4::<0>(a, 0, a, b); @@ -54462,7 +56346,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_insertf32x4() { + const unsafe fn test_mm256_maskz_insertf32x4() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm_set_ps(17., 18., 19., 20.); let r = _mm256_maskz_insertf32x4::<0>(0, a, b); @@ -54473,21 +56357,21 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castps128_ps512() { + const unsafe fn test_mm512_castps128_ps512() { let a = _mm_setr_ps(17., 18., 19., 20.); let r = _mm512_castps128_ps512(a); assert_eq_m128(_mm512_castps512_ps128(r), a); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castps256_ps512() { + const unsafe fn test_mm512_castps256_ps512() { let a = _mm256_setr_ps(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm512_castps256_ps512(a); assert_eq_m256(_mm512_castps512_ps256(r), a); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_zextps128_ps512() { + const unsafe fn test_mm512_zextps128_ps512() { let a = _mm_setr_ps(17., 18., 19., 20.); let r = _mm512_zextps128_ps512(a); let e = _mm512_setr_ps( @@ -54497,7 +56381,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_zextps256_ps512() { + const unsafe fn test_mm512_zextps256_ps512() { let a = _mm256_setr_ps(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm512_zextps256_ps512(a); let e = _mm512_setr_ps( @@ -54507,7 +56391,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castps512_ps128() { + const unsafe fn test_mm512_castps512_ps128() { let a = _mm512_setr_ps( 17., 18., 19., 20., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., ); @@ -54517,7 +56401,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castps512_ps256() { + const unsafe fn test_mm512_castps512_ps256() { let a = _mm512_setr_ps( 17., 18., 19., 20., 21., 22., 23., 24., -1., -1., -1., -1., -1., -1., -1., -1., ); @@ -54527,7 +56411,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castps_pd() { + const unsafe fn test_mm512_castps_pd() { let a = _mm512_set1_ps(1.); let r = _mm512_castps_pd(a); let e = _mm512_set1_pd(0.007812501848093234); @@ -54535,7 +56419,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castps_si512() { + const unsafe fn test_mm512_castps_si512() { let a = _mm512_set1_ps(1.); let r = _mm512_castps_si512(a); let e = _mm512_set1_epi32(1065353216); @@ -54543,7 +56427,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_broadcastd_epi32() { + const unsafe fn test_mm512_broadcastd_epi32() { let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm512_broadcastd_epi32(a); let e = _mm512_set1_epi32(20); @@ -54551,7 +56435,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_broadcastd_epi32() { + const unsafe fn test_mm512_mask_broadcastd_epi32() { let src = _mm512_set1_epi32(20); let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm512_mask_broadcastd_epi32(src, 0, a); @@ -54562,7 +56446,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_broadcastd_epi32() { + const unsafe fn test_mm512_maskz_broadcastd_epi32() { let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm512_maskz_broadcastd_epi32(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -54572,7 +56456,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_broadcastd_epi32() { + const unsafe fn test_mm256_mask_broadcastd_epi32() { let src = _mm256_set1_epi32(20); let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm256_mask_broadcastd_epi32(src, 0, a); @@ -54583,7 +56467,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_broadcastd_epi32() { + const unsafe fn test_mm256_maskz_broadcastd_epi32() { let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm256_maskz_broadcastd_epi32(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -54593,7 +56477,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_broadcastd_epi32() { + const unsafe fn test_mm_mask_broadcastd_epi32() { let src = _mm_set1_epi32(20); let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm_mask_broadcastd_epi32(src, 0, a); @@ -54604,7 +56488,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_broadcastd_epi32() { + const unsafe fn test_mm_maskz_broadcastd_epi32() { let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm_maskz_broadcastd_epi32(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -54614,7 +56498,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_broadcastss_ps() { + const unsafe fn test_mm512_broadcastss_ps() { let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm512_broadcastss_ps(a); let e = _mm512_set1_ps(20.); @@ -54622,7 +56506,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_broadcastss_ps() { + const unsafe fn test_mm512_mask_broadcastss_ps() { let src = _mm512_set1_ps(20.); let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm512_mask_broadcastss_ps(src, 0, a); @@ -54633,7 +56517,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_broadcastss_ps() { + const unsafe fn test_mm512_maskz_broadcastss_ps() { let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm512_maskz_broadcastss_ps(0, a); assert_eq_m512(r, _mm512_setzero_ps()); @@ -54645,7 +56529,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_broadcastss_ps() { + const unsafe fn test_mm256_mask_broadcastss_ps() { let src = _mm256_set1_ps(20.); let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm256_mask_broadcastss_ps(src, 0, a); @@ -54656,7 +56540,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_broadcastss_ps() { + const unsafe fn test_mm256_maskz_broadcastss_ps() { let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm256_maskz_broadcastss_ps(0, a); assert_eq_m256(r, _mm256_setzero_ps()); @@ -54666,7 +56550,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_broadcastss_ps() { + const unsafe fn test_mm_mask_broadcastss_ps() { let src = _mm_set1_ps(20.); let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm_mask_broadcastss_ps(src, 0, a); @@ -54677,7 +56561,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_broadcastss_ps() { + const unsafe fn test_mm_maskz_broadcastss_ps() { let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm_maskz_broadcastss_ps(0, a); assert_eq_m128(r, _mm_setzero_ps()); @@ -54687,7 +56571,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_broadcast_i32x4() { + const unsafe fn test_mm512_broadcast_i32x4() { let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm512_broadcast_i32x4(a); let e = _mm512_set_epi32( @@ -54697,7 +56581,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_broadcast_i32x4() { + const unsafe fn test_mm512_mask_broadcast_i32x4() { let src = _mm512_set1_epi32(20); let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm512_mask_broadcast_i32x4(src, 0, a); @@ -54710,7 +56594,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_broadcast_i32x4() { + const unsafe fn test_mm512_maskz_broadcast_i32x4() { let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm512_maskz_broadcast_i32x4(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -54720,7 +56604,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_broadcast_i32x4() { + const unsafe fn test_mm256_broadcast_i32x4() { let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm256_broadcast_i32x4(a); let e = _mm256_set_epi32(17, 18, 19, 20, 17, 18, 19, 20); @@ -54728,7 +56612,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_broadcast_i32x4() { + const unsafe fn test_mm256_mask_broadcast_i32x4() { let src = _mm256_set1_epi32(20); let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm256_mask_broadcast_i32x4(src, 0, a); @@ -54739,7 +56623,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_broadcast_i32x4() { + const unsafe fn test_mm256_maskz_broadcast_i32x4() { let a = _mm_set_epi32(17, 18, 19, 20); let r = _mm256_maskz_broadcast_i32x4(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -54749,7 +56633,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_broadcast_f32x4() { + const unsafe fn test_mm512_broadcast_f32x4() { let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm512_broadcast_f32x4(a); let e = _mm512_set_ps( @@ -54759,7 +56643,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_broadcast_f32x4() { + const unsafe fn test_mm512_mask_broadcast_f32x4() { let src = _mm512_set1_ps(20.); let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm512_mask_broadcast_f32x4(src, 0, a); @@ -54772,7 +56656,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_broadcast_f32x4() { + const unsafe fn test_mm512_maskz_broadcast_f32x4() { let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm512_maskz_broadcast_f32x4(0, a); assert_eq_m512(r, _mm512_setzero_ps()); @@ -54784,7 +56668,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_broadcast_f32x4() { + const unsafe fn test_mm256_broadcast_f32x4() { let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm256_broadcast_f32x4(a); let e = _mm256_set_ps(17., 18., 19., 20., 17., 18., 19., 20.); @@ -54792,7 +56676,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_broadcast_f32x4() { + const unsafe fn test_mm256_mask_broadcast_f32x4() { let src = _mm256_set1_ps(20.); let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm256_mask_broadcast_f32x4(src, 0, a); @@ -54803,7 +56687,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_broadcast_f32x4() { + const unsafe fn test_mm256_maskz_broadcast_f32x4() { let a = _mm_set_ps(17., 18., 19., 20.); let r = _mm256_maskz_broadcast_f32x4(0, a); assert_eq_m256(r, _mm256_setzero_ps()); @@ -54813,7 +56697,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_blend_epi32() { + const unsafe fn test_mm512_mask_blend_epi32() { let a = _mm512_set1_epi32(1); let b = _mm512_set1_epi32(2); let r = _mm512_mask_blend_epi32(0b11111111_00000000, a, b); @@ -54822,7 +56706,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_blend_epi32() { + const unsafe fn test_mm256_mask_blend_epi32() { let a = _mm256_set1_epi32(1); let b = _mm256_set1_epi32(2); let r = _mm256_mask_blend_epi32(0b11111111, a, b); @@ -54831,7 +56715,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_blend_epi32() { + const unsafe fn test_mm_mask_blend_epi32() { let a = _mm_set1_epi32(1); let b = _mm_set1_epi32(2); let r = _mm_mask_blend_epi32(0b00001111, a, b); @@ -54840,7 +56724,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_blend_ps() { + const unsafe fn test_mm512_mask_blend_ps() { let a = _mm512_set1_ps(1.); let b = _mm512_set1_ps(2.); let r = _mm512_mask_blend_ps(0b11111111_00000000, a, b); @@ -54851,7 +56735,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_blend_ps() { + const unsafe fn test_mm256_mask_blend_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set1_ps(2.); let r = _mm256_mask_blend_ps(0b11111111, a, b); @@ -54860,7 +56744,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_blend_ps() { + const unsafe fn test_mm_mask_blend_ps() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let r = _mm_mask_blend_ps(0b00001111, a, b); @@ -54869,7 +56753,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_unpackhi_epi32() { + const unsafe fn test_mm512_unpackhi_epi32() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm512_set_epi32( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -54880,7 +56764,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_unpackhi_epi32() { + const unsafe fn test_mm512_mask_unpackhi_epi32() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm512_set_epi32( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -54893,7 +56777,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_unpackhi_epi32() { + const unsafe fn test_mm512_maskz_unpackhi_epi32() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm512_set_epi32( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -54906,7 +56790,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_unpackhi_epi32() { + const unsafe fn test_mm256_mask_unpackhi_epi32() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm256_set_epi32(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm256_mask_unpackhi_epi32(a, 0, a, b); @@ -54917,7 +56801,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_unpackhi_epi32() { + const unsafe fn test_mm256_maskz_unpackhi_epi32() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm256_set_epi32(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm256_maskz_unpackhi_epi32(0, a, b); @@ -54928,7 +56812,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_unpackhi_epi32() { + const unsafe fn test_mm_mask_unpackhi_epi32() { let a = _mm_set_epi32(1, 2, 3, 4); let b = _mm_set_epi32(17, 18, 19, 20); let r = _mm_mask_unpackhi_epi32(a, 0, a, b); @@ -54939,7 +56823,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_unpackhi_epi32() { + const unsafe fn test_mm_maskz_unpackhi_epi32() { let a = _mm_set_epi32(1, 2, 3, 4); let b = _mm_set_epi32(17, 18, 19, 20); let r = _mm_maskz_unpackhi_epi32(0, a, b); @@ -54950,7 +56834,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_unpackhi_ps() { + const unsafe fn test_mm512_unpackhi_ps() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54965,7 +56849,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_unpackhi_ps() { + const unsafe fn test_mm512_mask_unpackhi_ps() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54982,7 +56866,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_unpackhi_ps() { + const unsafe fn test_mm512_maskz_unpackhi_ps() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -54999,7 +56883,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_unpackhi_ps() { + const unsafe fn test_mm256_mask_unpackhi_ps() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_set_ps(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm256_mask_unpackhi_ps(a, 0, a, b); @@ -55010,7 +56894,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_unpackhi_ps() { + const unsafe fn test_mm256_maskz_unpackhi_ps() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_set_ps(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm256_maskz_unpackhi_ps(0, a, b); @@ -55021,7 +56905,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_unpackhi_ps() { + const unsafe fn test_mm_mask_unpackhi_ps() { let a = _mm_set_ps(1., 2., 3., 4.); let b = _mm_set_ps(17., 18., 19., 20.); let r = _mm_mask_unpackhi_ps(a, 0, a, b); @@ -55032,7 +56916,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_unpackhi_ps() { + const unsafe fn test_mm_maskz_unpackhi_ps() { let a = _mm_set_ps(1., 2., 3., 4.); let b = _mm_set_ps(17., 18., 19., 20.); let r = _mm_maskz_unpackhi_ps(0, a, b); @@ -55043,7 +56927,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_unpacklo_epi32() { + const unsafe fn test_mm512_unpacklo_epi32() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm512_set_epi32( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -55054,7 +56938,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_unpacklo_epi32() { + const unsafe fn test_mm512_mask_unpacklo_epi32() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm512_set_epi32( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -55067,7 +56951,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_unpacklo_epi32() { + const unsafe fn test_mm512_maskz_unpacklo_epi32() { let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let b = _mm512_set_epi32( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -55080,7 +56964,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_unpacklo_epi32() { + const unsafe fn test_mm256_mask_unpacklo_epi32() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm256_set_epi32(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm256_mask_unpacklo_epi32(a, 0, a, b); @@ -55091,7 +56975,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_unpacklo_epi32() { + const unsafe fn test_mm256_maskz_unpacklo_epi32() { let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm256_set_epi32(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm256_maskz_unpacklo_epi32(0, a, b); @@ -55102,7 +56986,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_unpacklo_epi32() { + const unsafe fn test_mm_mask_unpacklo_epi32() { let a = _mm_set_epi32(1, 2, 3, 4); let b = _mm_set_epi32(17, 18, 19, 20); let r = _mm_mask_unpacklo_epi32(a, 0, a, b); @@ -55113,7 +56997,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_unpacklo_epi32() { + const unsafe fn test_mm_maskz_unpacklo_epi32() { let a = _mm_set_epi32(1, 2, 3, 4); let b = _mm_set_epi32(17, 18, 19, 20); let r = _mm_maskz_unpacklo_epi32(0, a, b); @@ -55124,7 +57008,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_unpacklo_ps() { + const unsafe fn test_mm512_unpacklo_ps() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -55139,7 +57023,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_unpacklo_ps() { + const unsafe fn test_mm512_mask_unpacklo_ps() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -55156,7 +57040,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_unpacklo_ps() { + const unsafe fn test_mm512_maskz_unpacklo_ps() { let a = _mm512_set_ps( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -55173,7 +57057,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_unpacklo_ps() { + const unsafe fn test_mm256_mask_unpacklo_ps() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_set_ps(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm256_mask_unpacklo_ps(a, 0, a, b); @@ -55184,7 +57068,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_unpacklo_ps() { + const unsafe fn test_mm256_maskz_unpacklo_ps() { let a = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_set_ps(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm256_maskz_unpacklo_ps(0, a, b); @@ -55195,7 +57079,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_unpacklo_ps() { + const unsafe fn test_mm_mask_unpacklo_ps() { let a = _mm_set_ps(1., 2., 3., 4.); let b = _mm_set_ps(17., 18., 19., 20.); let r = _mm_mask_unpacklo_ps(a, 0, a, b); @@ -55206,7 +57090,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_unpacklo_ps() { + const unsafe fn test_mm_maskz_unpacklo_ps() { let a = _mm_set_ps(1., 2., 3., 4.); let b = _mm_set_ps(17., 18., 19., 20.); let r = _mm_maskz_unpacklo_ps(0, a, b); @@ -55217,7 +57101,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_alignr_epi32() { + const unsafe fn test_mm512_alignr_epi32() { let a = _mm512_set_epi32(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1); let b = _mm512_set_epi32( 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, @@ -55234,7 +57118,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_alignr_epi32() { + const unsafe fn test_mm512_mask_alignr_epi32() { let a = _mm512_set_epi32(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1); let b = _mm512_set_epi32( 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, @@ -55249,7 +57133,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_alignr_epi32() { + const unsafe fn test_mm512_maskz_alignr_epi32() { let a = _mm512_set_epi32(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1); let b = _mm512_set_epi32( 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, @@ -55262,7 +57146,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_alignr_epi32() { + const unsafe fn test_mm256_alignr_epi32() { let a = _mm256_set_epi32(8, 7, 6, 5, 4, 3, 2, 1); let b = _mm256_set_epi32(16, 15, 14, 13, 12, 11, 10, 9); let r = _mm256_alignr_epi32::<0>(a, b); @@ -55273,7 +57157,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_alignr_epi32() { + const unsafe fn test_mm256_mask_alignr_epi32() { let a = _mm256_set_epi32(8, 7, 6, 5, 4, 3, 2, 1); let b = _mm256_set_epi32(16, 15, 14, 13, 12, 11, 10, 9); let r = _mm256_mask_alignr_epi32::<1>(a, 0, a, b); @@ -55284,7 +57168,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_alignr_epi32() { + const unsafe fn test_mm256_maskz_alignr_epi32() { let a = _mm256_set_epi32(8, 7, 6, 5, 4, 3, 2, 1); let b = _mm256_set_epi32(16, 15, 14, 13, 12, 11, 10, 9); let r = _mm256_maskz_alignr_epi32::<1>(0, a, b); @@ -55295,7 +57179,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_alignr_epi32() { + const unsafe fn test_mm_alignr_epi32() { let a = _mm_set_epi32(4, 3, 2, 1); let b = _mm_set_epi32(8, 7, 6, 5); let r = _mm_alignr_epi32::<0>(a, b); @@ -55306,7 +57190,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_alignr_epi32() { + const unsafe fn test_mm_mask_alignr_epi32() { let a = _mm_set_epi32(4, 3, 2, 1); let b = _mm_set_epi32(8, 7, 6, 5); let r = _mm_mask_alignr_epi32::<1>(a, 0, a, b); @@ -55317,7 +57201,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_alignr_epi32() { + const unsafe fn test_mm_maskz_alignr_epi32() { let a = _mm_set_epi32(4, 3, 2, 1); let b = _mm_set_epi32(8, 7, 6, 5); let r = _mm_maskz_alignr_epi32::<1>(0, a, b); @@ -55328,7 +57212,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_and_epi32() { + const unsafe fn test_mm512_and_epi32() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55349,7 +57233,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_and_epi32() { + const unsafe fn test_mm512_mask_and_epi32() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55378,7 +57262,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_and_epi32() { + const unsafe fn test_mm512_maskz_and_epi32() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55401,7 +57285,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_and_epi32() { + const unsafe fn test_mm256_mask_and_epi32() { let a = _mm256_set1_epi32(1 << 1 | 1 << 2); let b = _mm256_set1_epi32(1 << 1); let r = _mm256_mask_and_epi32(a, 0, a, b); @@ -55412,7 +57296,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_and_epi32() { + const unsafe fn test_mm256_maskz_and_epi32() { let a = _mm256_set1_epi32(1 << 1 | 1 << 2); let b = _mm256_set1_epi32(1 << 1); let r = _mm256_maskz_and_epi32(0, a, b); @@ -55423,7 +57307,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_and_epi32() { + const unsafe fn test_mm_mask_and_epi32() { let a = _mm_set1_epi32(1 << 1 | 1 << 2); let b = _mm_set1_epi32(1 << 1); let r = _mm_mask_and_epi32(a, 0, a, b); @@ -55434,7 +57318,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_and_epi32() { + const unsafe fn test_mm_maskz_and_epi32() { let a = _mm_set1_epi32(1 << 1 | 1 << 2); let b = _mm_set1_epi32(1 << 1); let r = _mm_maskz_and_epi32(0, a, b); @@ -55445,7 +57329,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_and_si512() { + const unsafe fn test_mm512_and_si512() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55466,7 +57350,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_or_epi32() { + const unsafe fn test_mm512_or_epi32() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55493,7 +57377,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_or_epi32() { + const unsafe fn test_mm512_mask_or_epi32() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55522,7 +57406,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_or_epi32() { + const unsafe fn test_mm512_maskz_or_epi32() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55551,7 +57435,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_or_epi32() { + const unsafe fn test_mm256_or_epi32() { let a = _mm256_set1_epi32(1 << 1 | 1 << 2); let b = _mm256_set1_epi32(1 << 1); let r = _mm256_or_epi32(a, b); @@ -55560,7 +57444,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_or_epi32() { + const unsafe fn test_mm256_mask_or_epi32() { let a = _mm256_set1_epi32(1 << 1 | 1 << 2); let b = _mm256_set1_epi32(1 << 1); let r = _mm256_mask_or_epi32(a, 0, a, b); @@ -55571,7 +57455,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_or_epi32() { + const unsafe fn test_mm256_maskz_or_epi32() { let a = _mm256_set1_epi32(1 << 1 | 1 << 2); let b = _mm256_set1_epi32(1 << 1); let r = _mm256_maskz_or_epi32(0, a, b); @@ -55582,7 +57466,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_or_epi32() { + const unsafe fn test_mm_or_epi32() { let a = _mm_set1_epi32(1 << 1 | 1 << 2); let b = _mm_set1_epi32(1 << 1); let r = _mm_or_epi32(a, b); @@ -55591,7 +57475,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_or_epi32() { + const unsafe fn test_mm_mask_or_epi32() { let a = _mm_set1_epi32(1 << 1 | 1 << 2); let b = _mm_set1_epi32(1 << 1); let r = _mm_mask_or_epi32(a, 0, a, b); @@ -55602,7 +57486,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_or_epi32() { + const unsafe fn test_mm_maskz_or_epi32() { let a = _mm_set1_epi32(1 << 1 | 1 << 2); let b = _mm_set1_epi32(1 << 1); let r = _mm_maskz_or_epi32(0, a, b); @@ -55613,7 +57497,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_or_si512() { + const unsafe fn test_mm512_or_si512() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55640,7 +57524,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_xor_epi32() { + const unsafe fn test_mm512_xor_epi32() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55667,7 +57551,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_xor_epi32() { + const unsafe fn test_mm512_mask_xor_epi32() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55696,7 +57580,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_xor_epi32() { + const unsafe fn test_mm512_maskz_xor_epi32() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55719,7 +57603,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_xor_epi32() { + const unsafe fn test_mm256_xor_epi32() { let a = _mm256_set1_epi32(1 << 1 | 1 << 2); let b = _mm256_set1_epi32(1 << 1); let r = _mm256_xor_epi32(a, b); @@ -55728,7 +57612,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_xor_epi32() { + const unsafe fn test_mm256_mask_xor_epi32() { let a = _mm256_set1_epi32(1 << 1 | 1 << 2); let b = _mm256_set1_epi32(1 << 1); let r = _mm256_mask_xor_epi32(a, 0, a, b); @@ -55739,7 +57623,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_xor_epi32() { + const unsafe fn test_mm256_maskz_xor_epi32() { let a = _mm256_set1_epi32(1 << 1 | 1 << 2); let b = _mm256_set1_epi32(1 << 1); let r = _mm256_maskz_xor_epi32(0, a, b); @@ -55750,7 +57634,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_xor_epi32() { + const unsafe fn test_mm_xor_epi32() { let a = _mm_set1_epi32(1 << 1 | 1 << 2); let b = _mm_set1_epi32(1 << 1); let r = _mm_xor_epi32(a, b); @@ -55759,7 +57643,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_xor_epi32() { + const unsafe fn test_mm_mask_xor_epi32() { let a = _mm_set1_epi32(1 << 1 | 1 << 2); let b = _mm_set1_epi32(1 << 1); let r = _mm_mask_xor_epi32(a, 0, a, b); @@ -55770,7 +57654,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_xor_epi32() { + const unsafe fn test_mm_maskz_xor_epi32() { let a = _mm_set1_epi32(1 << 1 | 1 << 2); let b = _mm_set1_epi32(1 << 1); let r = _mm_maskz_xor_epi32(0, a, b); @@ -55781,7 +57665,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_xor_si512() { + const unsafe fn test_mm512_xor_si512() { #[rustfmt::skip] let a = _mm512_set_epi32( 1 << 1 | 1 << 2, 0, 0, 0, @@ -55808,7 +57692,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_andnot_epi32() { + const unsafe fn test_mm512_andnot_epi32() { let a = _mm512_set1_epi32(0); let b = _mm512_set1_epi32(1 << 3 | 1 << 4); let r = _mm512_andnot_epi32(a, b); @@ -55817,7 +57701,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_andnot_epi32() { + const unsafe fn test_mm512_mask_andnot_epi32() { let a = _mm512_set1_epi32(1 << 1 | 1 << 2); let b = _mm512_set1_epi32(1 << 3 | 1 << 4); let r = _mm512_mask_andnot_epi32(a, 0, a, b); @@ -55828,7 +57712,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_andnot_epi32() { + const unsafe fn test_mm512_maskz_andnot_epi32() { let a = _mm512_set1_epi32(1 << 1 | 1 << 2); let b = _mm512_set1_epi32(1 << 3 | 1 << 4); let r = _mm512_maskz_andnot_epi32(0, a, b); @@ -55845,7 +57729,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_andnot_epi32() { + const unsafe fn test_mm256_mask_andnot_epi32() { let a = _mm256_set1_epi32(1 << 1 | 1 << 2); let b = _mm256_set1_epi32(1 << 3 | 1 << 4); let r = _mm256_mask_andnot_epi32(a, 0, a, b); @@ -55856,7 +57740,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_andnot_epi32() { + const unsafe fn test_mm256_maskz_andnot_epi32() { let a = _mm256_set1_epi32(1 << 1 | 1 << 2); let b = _mm256_set1_epi32(1 << 3 | 1 << 4); let r = _mm256_maskz_andnot_epi32(0, a, b); @@ -55867,7 +57751,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_andnot_epi32() { + const unsafe fn test_mm_mask_andnot_epi32() { let a = _mm_set1_epi32(1 << 1 | 1 << 2); let b = _mm_set1_epi32(1 << 3 | 1 << 4); let r = _mm_mask_andnot_epi32(a, 0, a, b); @@ -55878,7 +57762,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_andnot_epi32() { + const unsafe fn test_mm_maskz_andnot_epi32() { let a = _mm_set1_epi32(1 << 1 | 1 << 2); let b = _mm_set1_epi32(1 << 3 | 1 << 4); let r = _mm_maskz_andnot_epi32(0, a, b); @@ -55889,7 +57773,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_cvtmask16_u32() { + const unsafe fn test_cvtmask16_u32() { let a: __mmask16 = 0b11001100_00110011; let r = _cvtmask16_u32(a); let e: u32 = 0b11001100_00110011; @@ -55897,7 +57781,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_cvtu32_mask16() { + const unsafe fn test_cvtu32_mask16() { let a: u32 = 0b11001100_00110011; let r = _cvtu32_mask16(a); let e: __mmask16 = 0b11001100_00110011; @@ -55905,7 +57789,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_kand() { + const unsafe fn test_mm512_kand() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b11001100_00110011; let r = _mm512_kand(a, b); @@ -55914,7 +57798,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_kand_mask16() { + const unsafe fn test_kand_mask16() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b11001100_00110011; let r = _kand_mask16(a, b); @@ -55923,7 +57807,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_kor() { + const unsafe fn test_mm512_kor() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b00101110_00001011; let r = _mm512_kor(a, b); @@ -55932,7 +57816,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_kor_mask16() { + const unsafe fn test_kor_mask16() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b00101110_00001011; let r = _kor_mask16(a, b); @@ -55941,7 +57825,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_kxor() { + const unsafe fn test_mm512_kxor() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b00101110_00001011; let r = _mm512_kxor(a, b); @@ -55950,7 +57834,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_kxor_mask16() { + const unsafe fn test_kxor_mask16() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b00101110_00001011; let r = _kxor_mask16(a, b); @@ -55959,7 +57843,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_knot() { + const unsafe fn test_mm512_knot() { let a: u16 = 0b11001100_00110011; let r = _mm512_knot(a); let e: u16 = 0b00110011_11001100; @@ -55967,7 +57851,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_knot_mask16() { + const unsafe fn test_knot_mask16() { let a: u16 = 0b11001100_00110011; let r = _knot_mask16(a); let e: u16 = 0b00110011_11001100; @@ -55975,7 +57859,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_kandn() { + const unsafe fn test_mm512_kandn() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b00101110_00001011; let r = _mm512_kandn(a, b); @@ -55984,7 +57868,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_kandn_mask16() { + const unsafe fn test_kandn_mask16() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b00101110_00001011; let r = _kandn_mask16(a, b); @@ -55993,7 +57877,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_kxnor() { + const unsafe fn test_mm512_kxnor() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b00101110_00001011; let r = _mm512_kxnor(a, b); @@ -56002,7 +57886,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_kxnor_mask16() { + const unsafe fn test_kxnor_mask16() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b00101110_00001011; let r = _kxnor_mask16(a, b); @@ -56011,7 +57895,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_kortest_mask16_u8() { + const unsafe fn test_kortest_mask16_u8() { let a: __mmask16 = 0b0110100101101001; let b: __mmask16 = 0b1011011010110110; let mut all_ones: u8 = 0; @@ -56021,7 +57905,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_kortestc_mask16_u8() { + const unsafe fn test_kortestc_mask16_u8() { let a: __mmask16 = 0b0110100101101001; let b: __mmask16 = 0b1011011010110110; let r = _kortestc_mask16_u8(a, b); @@ -56029,7 +57913,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_kortestz_mask16_u8() { + const unsafe fn test_kortestz_mask16_u8() { let a: __mmask16 = 0b0110100101101001; let b: __mmask16 = 0b1011011010110110; let r = _kortestz_mask16_u8(a, b); @@ -56037,7 +57921,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_kshiftli_mask16() { + const unsafe fn test_kshiftli_mask16() { let a: __mmask16 = 0b1001011011000011; let r = _kshiftli_mask16::<3>(a); let e: __mmask16 = 0b1011011000011000; @@ -56057,7 +57941,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_kshiftri_mask16() { + const unsafe fn test_kshiftri_mask16() { let a: __mmask16 = 0b1010100100111100; let r = _kshiftri_mask16::<3>(a); let e: __mmask16 = 0b0001010100100111; @@ -56077,7 +57961,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_load_mask16() { + const unsafe fn test_load_mask16() { let a: __mmask16 = 0b1001011011000011; let r = _load_mask16(&a); let e: __mmask16 = 0b1001011011000011; @@ -56085,7 +57969,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_store_mask16() { + const unsafe fn test_store_mask16() { let a: __mmask16 = 0b0110100100111100; let mut r = 0; _store_mask16(&mut r, a); @@ -56094,7 +57978,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_kmov() { + const unsafe fn test_mm512_kmov() { let a: u16 = 0b11001100_00110011; let r = _mm512_kmov(a); let e: u16 = 0b11001100_00110011; @@ -56102,7 +57986,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_int2mask() { + const unsafe fn test_mm512_int2mask() { let a: i32 = 0b11001100_00110011; let r = _mm512_int2mask(a); let e: u16 = 0b11001100_00110011; @@ -56110,7 +57994,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask2int() { + const unsafe fn test_mm512_mask2int() { let k1: __mmask16 = 0b11001100_00110011; let r = _mm512_mask2int(k1); let e: i32 = 0b11001100_00110011; @@ -56118,7 +58002,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_kunpackb() { + const unsafe fn test_mm512_kunpackb() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b00101110_00001011; let r = _mm512_kunpackb(a, b); @@ -56127,7 +58011,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_kortestc() { + const unsafe fn test_mm512_kortestc() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b00101110_00001011; let r = _mm512_kortestc(a, b); @@ -56138,7 +58022,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_kortestz() { + const unsafe fn test_mm512_kortestz() { let a: u16 = 0b11001100_00110011; let b: u16 = 0b00101110_00001011; let r = _mm512_kortestz(a, b); @@ -56148,7 +58032,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_test_epi32_mask() { + const unsafe fn test_mm512_test_epi32_mask() { let a = _mm512_set1_epi32(1 << 0); let b = _mm512_set1_epi32(1 << 0 | 1 << 1); let r = _mm512_test_epi32_mask(a, b); @@ -56157,7 +58041,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_test_epi32_mask() { + const unsafe fn test_mm512_mask_test_epi32_mask() { let a = _mm512_set1_epi32(1 << 0); let b = _mm512_set1_epi32(1 << 0 | 1 << 1); let r = _mm512_mask_test_epi32_mask(0, a, b); @@ -56168,7 +58052,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_test_epi32_mask() { + const unsafe fn test_mm256_test_epi32_mask() { let a = _mm256_set1_epi32(1 << 0); let b = _mm256_set1_epi32(1 << 0 | 1 << 1); let r = _mm256_test_epi32_mask(a, b); @@ -56177,7 +58061,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_test_epi32_mask() { + const unsafe fn test_mm256_mask_test_epi32_mask() { let a = _mm256_set1_epi32(1 << 0); let b = _mm256_set1_epi32(1 << 0 | 1 << 1); let r = _mm256_mask_test_epi32_mask(0, a, b); @@ -56188,7 +58072,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_test_epi32_mask() { + const unsafe fn test_mm_test_epi32_mask() { let a = _mm_set1_epi32(1 << 0); let b = _mm_set1_epi32(1 << 0 | 1 << 1); let r = _mm_test_epi32_mask(a, b); @@ -56197,7 +58081,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_test_epi32_mask() { + const unsafe fn test_mm_mask_test_epi32_mask() { let a = _mm_set1_epi32(1 << 0); let b = _mm_set1_epi32(1 << 0 | 1 << 1); let r = _mm_mask_test_epi32_mask(0, a, b); @@ -56208,7 +58092,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_testn_epi32_mask() { + const unsafe fn test_mm512_testn_epi32_mask() { let a = _mm512_set1_epi32(1 << 0); let b = _mm512_set1_epi32(1 << 0 | 1 << 1); let r = _mm512_testn_epi32_mask(a, b); @@ -56217,7 +58101,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_testn_epi32_mask() { + const unsafe fn test_mm512_mask_testn_epi32_mask() { let a = _mm512_set1_epi32(1 << 0); let b = _mm512_set1_epi32(1 << 1); let r = _mm512_mask_test_epi32_mask(0, a, b); @@ -56228,7 +58112,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_testn_epi32_mask() { + const unsafe fn test_mm256_testn_epi32_mask() { let a = _mm256_set1_epi32(1 << 0); let b = _mm256_set1_epi32(1 << 1); let r = _mm256_testn_epi32_mask(a, b); @@ -56237,7 +58121,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_testn_epi32_mask() { + const unsafe fn test_mm256_mask_testn_epi32_mask() { let a = _mm256_set1_epi32(1 << 0); let b = _mm256_set1_epi32(1 << 1); let r = _mm256_mask_test_epi32_mask(0, a, b); @@ -56248,7 +58132,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_testn_epi32_mask() { + const unsafe fn test_mm_testn_epi32_mask() { let a = _mm_set1_epi32(1 << 0); let b = _mm_set1_epi32(1 << 1); let r = _mm_testn_epi32_mask(a, b); @@ -56257,7 +58141,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_testn_epi32_mask() { + const unsafe fn test_mm_mask_testn_epi32_mask() { let a = _mm_set1_epi32(1 << 0); let b = _mm_set1_epi32(1 << 1); let r = _mm_mask_test_epi32_mask(0, a, b); @@ -56326,84 +58210,84 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_add_epi32() { + const unsafe fn test_mm512_reduce_add_epi32() { let a = _mm512_set1_epi32(1); let e: i32 = _mm512_reduce_add_epi32(a); assert_eq!(16, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_add_epi32() { + const unsafe fn test_mm512_mask_reduce_add_epi32() { let a = _mm512_set1_epi32(1); let e: i32 = _mm512_mask_reduce_add_epi32(0b11111111_00000000, a); assert_eq!(8, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_add_ps() { + const unsafe fn test_mm512_reduce_add_ps() { let a = _mm512_set1_ps(1.); let e: f32 = _mm512_reduce_add_ps(a); assert_eq!(16., e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_add_ps() { + const unsafe fn test_mm512_mask_reduce_add_ps() { let a = _mm512_set1_ps(1.); let e: f32 = _mm512_mask_reduce_add_ps(0b11111111_00000000, a); assert_eq!(8., e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_mul_epi32() { + const unsafe fn test_mm512_reduce_mul_epi32() { let a = _mm512_set1_epi32(2); let e: i32 = _mm512_reduce_mul_epi32(a); assert_eq!(65536, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_mul_epi32() { + const unsafe fn test_mm512_mask_reduce_mul_epi32() { let a = _mm512_set1_epi32(2); let e: i32 = _mm512_mask_reduce_mul_epi32(0b11111111_00000000, a); assert_eq!(256, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_mul_ps() { + const unsafe fn test_mm512_reduce_mul_ps() { let a = _mm512_set1_ps(2.); let e: f32 = _mm512_reduce_mul_ps(a); assert_eq!(65536., e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_mul_ps() { + const unsafe fn test_mm512_mask_reduce_mul_ps() { let a = _mm512_set1_ps(2.); let e: f32 = _mm512_mask_reduce_mul_ps(0b11111111_00000000, a); assert_eq!(256., e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_max_epi32() { + const unsafe fn test_mm512_reduce_max_epi32() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i32 = _mm512_reduce_max_epi32(a); assert_eq!(15, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_max_epi32() { + const unsafe fn test_mm512_mask_reduce_max_epi32() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i32 = _mm512_mask_reduce_max_epi32(0b11111111_00000000, a); assert_eq!(7, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_max_epu32() { + const unsafe fn test_mm512_reduce_max_epu32() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u32 = _mm512_reduce_max_epu32(a); assert_eq!(15, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_max_epu32() { + const unsafe fn test_mm512_mask_reduce_max_epu32() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u32 = _mm512_mask_reduce_max_epu32(0b11111111_00000000, a); assert_eq!(7, e); @@ -56428,28 +58312,28 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_min_epi32() { + const unsafe fn test_mm512_reduce_min_epi32() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i32 = _mm512_reduce_min_epi32(a); assert_eq!(0, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_min_epi32() { + const unsafe fn test_mm512_mask_reduce_min_epi32() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: i32 = _mm512_mask_reduce_min_epi32(0b11111111_00000000, a); assert_eq!(0, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_min_epu32() { + const unsafe fn test_mm512_reduce_min_epu32() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u32 = _mm512_reduce_min_epu32(a); assert_eq!(0, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_min_epu32() { + const unsafe fn test_mm512_mask_reduce_min_epu32() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let e: u32 = _mm512_mask_reduce_min_epu32(0b11111111_00000000, a); assert_eq!(0, e); @@ -56474,28 +58358,28 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_and_epi32() { + const unsafe fn test_mm512_reduce_and_epi32() { let a = _mm512_set_epi32(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e: i32 = _mm512_reduce_and_epi32(a); assert_eq!(0, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_and_epi32() { + const unsafe fn test_mm512_mask_reduce_and_epi32() { let a = _mm512_set_epi32(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e: i32 = _mm512_mask_reduce_and_epi32(0b11111111_00000000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_or_epi32() { + const unsafe fn test_mm512_reduce_or_epi32() { let a = _mm512_set_epi32(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e: i32 = _mm512_reduce_or_epi32(a); assert_eq!(3, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_or_epi32() { + const unsafe fn test_mm512_mask_reduce_or_epi32() { let a = _mm512_set_epi32(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2); let e: i32 = _mm512_mask_reduce_and_epi32(0b11111111_00000000, a); assert_eq!(1, e); @@ -56906,7 +58790,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_loadu_epi32() { + const unsafe fn test_mm512_loadu_epi32() { let a = &[4, 3, 2, 5, 8, 9, 64, 50, -4, -3, -2, -5, -8, -9, -64, -50]; let p = a.as_ptr(); let r = _mm512_loadu_epi32(black_box(p)); @@ -56915,7 +58799,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_loadu_epi32() { + const unsafe fn test_mm256_loadu_epi32() { let a = &[4, 3, 2, 5, 8, 9, 64, 50]; let p = a.as_ptr(); let r = _mm256_loadu_epi32(black_box(p)); @@ -56924,7 +58808,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_loadu_epi32() { + const unsafe fn test_mm_loadu_epi32() { let a = &[4, 3, 2, 5]; let p = a.as_ptr(); let r = _mm_loadu_epi32(black_box(p)); @@ -57128,7 +59012,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_storeu_epi32() { + const unsafe fn test_mm512_storeu_epi32() { let a = _mm512_set1_epi32(9); let mut r = _mm512_undefined_epi32(); _mm512_storeu_epi32(&mut r as *mut _ as *mut i32, a); @@ -57136,7 +59020,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_storeu_epi32() { + const unsafe fn test_mm256_storeu_epi32() { let a = _mm256_set1_epi32(9); let mut r = _mm256_undefined_si256(); _mm256_storeu_epi32(&mut r as *mut _ as *mut i32, a); @@ -57144,7 +59028,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_storeu_epi32() { + const unsafe fn test_mm_storeu_epi32() { let a = _mm_set1_epi32(9); let mut r = _mm_undefined_si128(); _mm_storeu_epi32(&mut r as *mut _ as *mut i32, a); @@ -57152,7 +59036,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_loadu_si512() { + const unsafe fn test_mm512_loadu_si512() { let a = &[4, 3, 2, 5, 8, 9, 64, 50, -4, -3, -2, -5, -8, -9, -64, -50]; let p = a.as_ptr().cast(); let r = _mm512_loadu_si512(black_box(p)); @@ -57161,7 +59045,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_storeu_si512() { + const unsafe fn test_mm512_storeu_si512() { let a = _mm512_set1_epi32(9); let mut r = _mm512_undefined_epi32(); _mm512_storeu_si512(&mut r as *mut _, a); @@ -57169,7 +59053,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_load_si512() { + const unsafe fn test_mm512_load_si512() { #[repr(align(64))] struct Align { data: [i32; 16], // 64 bytes @@ -57184,7 +59068,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_store_si512() { + const unsafe fn test_mm512_store_si512() { let a = _mm512_set1_epi32(9); let mut r = _mm512_undefined_epi32(); _mm512_store_si512(&mut r as *mut _, a); @@ -57192,7 +59076,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_load_epi32() { + const unsafe fn test_mm512_load_epi32() { #[repr(align(64))] struct Align { data: [i32; 16], // 64 bytes @@ -57207,7 +59091,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_load_epi32() { + const unsafe fn test_mm256_load_epi32() { #[repr(align(64))] struct Align { data: [i32; 8], @@ -57222,7 +59106,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_load_epi32() { + const unsafe fn test_mm_load_epi32() { #[repr(align(64))] struct Align { data: [i32; 4], @@ -57235,7 +59119,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_store_epi32() { + const unsafe fn test_mm512_store_epi32() { let a = _mm512_set1_epi32(9); let mut r = _mm512_undefined_epi32(); _mm512_store_epi32(&mut r as *mut _ as *mut i32, a); @@ -57243,7 +59127,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_store_epi32() { + const unsafe fn test_mm256_store_epi32() { let a = _mm256_set1_epi32(9); let mut r = _mm256_undefined_si256(); _mm256_store_epi32(&mut r as *mut _ as *mut i32, a); @@ -57251,7 +59135,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_store_epi32() { + const unsafe fn test_mm_store_epi32() { let a = _mm_set1_epi32(9); let mut r = _mm_undefined_si128(); _mm_store_epi32(&mut r as *mut _ as *mut i32, a); @@ -57259,7 +59143,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_load_ps() { + const unsafe fn test_mm512_load_ps() { #[repr(align(64))] struct Align { data: [f32; 16], // 64 bytes @@ -57278,7 +59162,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_store_ps() { + const unsafe fn test_mm512_store_ps() { let a = _mm512_set1_ps(9.); let mut r = _mm512_undefined_ps(); _mm512_store_ps(&mut r as *mut _ as *mut f32, a); @@ -57286,7 +59170,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_set1_epi32() { + const unsafe fn test_mm512_mask_set1_epi32() { let src = _mm512_set1_epi32(2); let a: i32 = 11; let r = _mm512_mask_set1_epi32(src, 0, a); @@ -57297,7 +59181,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_set1_epi32() { + const unsafe fn test_mm512_maskz_set1_epi32() { let a: i32 = 11; let r = _mm512_maskz_set1_epi32(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -57307,7 +59191,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_set1_epi32() { + const unsafe fn test_mm256_mask_set1_epi32() { let src = _mm256_set1_epi32(2); let a: i32 = 11; let r = _mm256_mask_set1_epi32(src, 0, a); @@ -57318,7 +59202,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_set1_epi32() { + const unsafe fn test_mm256_maskz_set1_epi32() { let a: i32 = 11; let r = _mm256_maskz_set1_epi32(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -57328,7 +59212,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_set1_epi32() { + const unsafe fn test_mm_mask_set1_epi32() { let src = _mm_set1_epi32(2); let a: i32 = 11; let r = _mm_mask_set1_epi32(src, 0, a); @@ -57339,7 +59223,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_set1_epi32() { + const unsafe fn test_mm_maskz_set1_epi32() { let a: i32 = 11; let r = _mm_maskz_set1_epi32(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -57349,7 +59233,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_move_ss() { + const unsafe fn test_mm_mask_move_ss() { let src = _mm_set_ps(10., 11., 100., 110.); let a = _mm_set_ps(1., 2., 10., 20.); let b = _mm_set_ps(3., 4., 30., 40.); @@ -57362,7 +59246,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_move_ss() { + const unsafe fn test_mm_maskz_move_ss() { let a = _mm_set_ps(1., 2., 10., 20.); let b = _mm_set_ps(3., 4., 30., 40.); let r = _mm_maskz_move_ss(0, a, b); @@ -57374,7 +59258,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_move_sd() { + const unsafe fn test_mm_mask_move_sd() { let src = _mm_set_pd(10., 11.); let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(3., 4.); @@ -57387,7 +59271,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_move_sd() { + const unsafe fn test_mm_maskz_move_sd() { let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(3., 4.); let r = _mm_maskz_move_sd(0, a, b); @@ -57399,7 +59283,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_add_ss() { + const unsafe fn test_mm_mask_add_ss() { let src = _mm_set_ps(10., 11., 100., 110.); let a = _mm_set_ps(1., 2., 10., 20.); let b = _mm_set_ps(3., 4., 30., 40.); @@ -57412,7 +59296,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_add_ss() { + const unsafe fn test_mm_maskz_add_ss() { let a = _mm_set_ps(1., 2., 10., 20.); let b = _mm_set_ps(3., 4., 30., 40.); let r = _mm_maskz_add_ss(0, a, b); @@ -57424,7 +59308,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_add_sd() { + const unsafe fn test_mm_mask_add_sd() { let src = _mm_set_pd(10., 11.); let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(3., 4.); @@ -57437,7 +59321,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_add_sd() { + const unsafe fn test_mm_maskz_add_sd() { let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(3., 4.); let r = _mm_maskz_add_sd(0, a, b); @@ -57449,7 +59333,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_sub_ss() { + const unsafe fn test_mm_mask_sub_ss() { let src = _mm_set_ps(10., 11., 100., 110.); let a = _mm_set_ps(1., 2., 10., 20.); let b = _mm_set_ps(3., 4., 30., 40.); @@ -57462,7 +59346,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_sub_ss() { + const unsafe fn test_mm_maskz_sub_ss() { let a = _mm_set_ps(1., 2., 10., 20.); let b = _mm_set_ps(3., 4., 30., 40.); let r = _mm_maskz_sub_ss(0, a, b); @@ -57474,7 +59358,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_sub_sd() { + const unsafe fn test_mm_mask_sub_sd() { let src = _mm_set_pd(10., 11.); let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(3., 4.); @@ -57487,7 +59371,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_sub_sd() { + const unsafe fn test_mm_maskz_sub_sd() { let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(3., 4.); let r = _mm_maskz_sub_sd(0, a, b); @@ -57499,7 +59383,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_mul_ss() { + const unsafe fn test_mm_mask_mul_ss() { let src = _mm_set_ps(10., 11., 100., 110.); let a = _mm_set_ps(1., 2., 10., 20.); let b = _mm_set_ps(3., 4., 30., 40.); @@ -57512,7 +59396,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_mul_ss() { + const unsafe fn test_mm_maskz_mul_ss() { let a = _mm_set_ps(1., 2., 10., 20.); let b = _mm_set_ps(3., 4., 30., 40.); let r = _mm_maskz_mul_ss(0, a, b); @@ -57524,7 +59408,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_mul_sd() { + const unsafe fn test_mm_mask_mul_sd() { let src = _mm_set_pd(10., 11.); let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(3., 4.); @@ -57537,7 +59421,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_mul_sd() { + const unsafe fn test_mm_maskz_mul_sd() { let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(3., 4.); let r = _mm_maskz_mul_sd(0, a, b); @@ -57549,7 +59433,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_div_ss() { + const unsafe fn test_mm_mask_div_ss() { let src = _mm_set_ps(10., 11., 100., 110.); let a = _mm_set_ps(1., 2., 10., 20.); let b = _mm_set_ps(3., 4., 30., 40.); @@ -57562,7 +59446,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_div_ss() { + const unsafe fn test_mm_maskz_div_ss() { let a = _mm_set_ps(1., 2., 10., 20.); let b = _mm_set_ps(3., 4., 30., 40.); let r = _mm_maskz_div_ss(0, a, b); @@ -57574,7 +59458,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_div_sd() { + const unsafe fn test_mm_mask_div_sd() { let src = _mm_set_pd(10., 11.); let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(3., 4.); @@ -57587,7 +59471,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_div_sd() { + const unsafe fn test_mm_maskz_div_sd() { let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(3., 4.); let r = _mm_maskz_div_sd(0, a, b); @@ -58145,7 +60029,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_fmadd_ss() { + const unsafe fn test_mm_mask_fmadd_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58157,7 +60041,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_fmadd_ss() { + const unsafe fn test_mm_maskz_fmadd_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58170,7 +60054,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask3_fmadd_ss() { + const unsafe fn test_mm_mask3_fmadd_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58182,7 +60066,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_fmadd_sd() { + const unsafe fn test_mm_mask_fmadd_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -58194,7 +60078,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_fmadd_sd() { + const unsafe fn test_mm_maskz_fmadd_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -58207,7 +60091,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask3_fmadd_sd() { + const unsafe fn test_mm_mask3_fmadd_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -58219,7 +60103,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_fmsub_ss() { + const unsafe fn test_mm_mask_fmsub_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58231,7 +60115,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_fmsub_ss() { + const unsafe fn test_mm_maskz_fmsub_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58244,7 +60128,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask3_fmsub_ss() { + const unsafe fn test_mm_mask3_fmsub_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58256,7 +60140,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_fmsub_sd() { + const unsafe fn test_mm_mask_fmsub_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -58268,7 +60152,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_fmsub_sd() { + const unsafe fn test_mm_maskz_fmsub_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -58281,7 +60165,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask3_fmsub_sd() { + const unsafe fn test_mm_mask3_fmsub_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -58293,7 +60177,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_fnmadd_ss() { + const unsafe fn test_mm_mask_fnmadd_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58305,7 +60189,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_fnmadd_ss() { + const unsafe fn test_mm_maskz_fnmadd_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58318,7 +60202,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask3_fnmadd_ss() { + const unsafe fn test_mm_mask3_fnmadd_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58330,7 +60214,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_fnmadd_sd() { + const unsafe fn test_mm_mask_fnmadd_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -58342,7 +60226,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_fnmadd_sd() { + const unsafe fn test_mm_maskz_fnmadd_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -58355,7 +60239,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask3_fnmadd_sd() { + const unsafe fn test_mm_mask3_fnmadd_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -58367,7 +60251,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_fnmsub_ss() { + const unsafe fn test_mm_mask_fnmsub_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58379,7 +60263,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_fnmsub_ss() { + const unsafe fn test_mm_maskz_fnmsub_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58392,7 +60276,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask3_fnmsub_ss() { + const unsafe fn test_mm_mask3_fnmsub_ss() { let a = _mm_set1_ps(1.); let b = _mm_set1_ps(2.); let c = _mm_set1_ps(3.); @@ -58404,7 +60288,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask_fnmsub_sd() { + const unsafe fn test_mm_mask_fnmsub_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -58416,7 +60300,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_maskz_fnmsub_sd() { + const unsafe fn test_mm_maskz_fnmsub_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -58429,7 +60313,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_mask3_fnmsub_sd() { + const unsafe fn test_mm_mask3_fnmsub_sd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let c = _mm_set1_pd(3.); @@ -60084,7 +61968,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_cvti32_ss() { + const unsafe fn test_mm_cvti32_ss() { let a = _mm_set_ps(0., -0.5, 1., -1.5); let b: i32 = 9; let r = _mm_cvti32_ss(a, b); @@ -60093,7 +61977,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_cvti32_sd() { + const unsafe fn test_mm_cvti32_sd() { let a = _mm_set_pd(1., -1.5); let b: i32 = 9; let r = _mm_cvti32_sd(a, b); @@ -60182,7 +62066,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_cvtu32_ss() { + const unsafe fn test_mm_cvtu32_ss() { let a = _mm_set_ps(0., -0.5, 1., -1.5); let b: u32 = 9; let r = _mm_cvtu32_ss(a, b); @@ -60191,7 +62075,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_cvtu32_sd() { + const unsafe fn test_mm_cvtu32_sd() { let a = _mm_set_pd(1., -1.5); let b: u32 = 9; let r = _mm_cvtu32_sd(a, b); @@ -60218,7 +62102,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtsi512_si32() { + const unsafe fn test_mm512_cvtsi512_si32() { let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_cvtsi512_si32(a); let e: i32 = 1; @@ -60226,7 +62110,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtss_f32() { + const unsafe fn test_mm512_cvtss_f32() { let a = _mm512_setr_ps( 312.0134, 3., 2., 5., 8., 9., 64., 50., -4., -3., -2., -5., -8., -9., -64., -50., ); @@ -60234,13 +62118,13 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtsd_f64() { + const unsafe fn test_mm512_cvtsd_f64() { let r = _mm512_cvtsd_f64(_mm512_setr_pd(-1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8)); assert_eq!(r, -1.1); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_shuffle_pd() { + const unsafe fn test_mm512_shuffle_pd() { let a = _mm512_setr_pd(1., 4., 5., 8., 1., 4., 5., 8.); let b = _mm512_setr_pd(2., 3., 6., 7., 2., 3., 6., 7.); let r = _mm512_shuffle_pd::<0b11_11_11_11>(a, b); @@ -60249,7 +62133,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_shuffle_pd() { + const unsafe fn test_mm512_mask_shuffle_pd() { let a = _mm512_setr_pd(1., 4., 5., 8., 1., 4., 5., 8.); let b = _mm512_setr_pd(2., 3., 6., 7., 2., 3., 6., 7.); let r = _mm512_mask_shuffle_pd::<0b11_11_11_11>(a, 0, a, b); @@ -60260,7 +62144,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_shuffle_pd() { + const unsafe fn test_mm512_maskz_shuffle_pd() { let a = _mm512_setr_pd(1., 4., 5., 8., 1., 4., 5., 8.); let b = _mm512_setr_pd(2., 3., 6., 7., 2., 3., 6., 7.); let r = _mm512_maskz_shuffle_pd::<0b11_11_11_11>(0, a, b); diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index 01b1be364f..de058c3073 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -9,7 +9,8 @@ use crate::ptr; #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_set_ph( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_ph( e7: f16, e6: f16, e5: f16, @@ -28,7 +29,8 @@ pub fn _mm_set_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_set_ph( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set_ph( e15: f16, e14: f16, e13: f16, @@ -57,7 +59,8 @@ pub fn _mm256_set_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_set_ph( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set_ph( e31: f16, e30: f16, e29: f16, @@ -104,7 +107,8 @@ pub fn _mm512_set_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_set_sh(a: f16) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_sh(a: f16) -> __m128h { __m128h([a, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) } @@ -114,7 +118,8 @@ pub fn _mm_set_sh(a: f16) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_set1_ph(a: f16) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set1_ph(a: f16) -> __m128h { unsafe { transmute(f16x8::splat(a)) } } @@ -124,7 +129,8 @@ pub fn _mm_set1_ph(a: f16) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_set1_ph(a: f16) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_set1_ph(a: f16) -> __m256h { unsafe { transmute(f16x16::splat(a)) } } @@ -134,7 +140,8 @@ pub fn _mm256_set1_ph(a: f16) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_set1_ph(a: f16) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_set1_ph(a: f16) -> __m512h { unsafe { transmute(f16x32::splat(a)) } } @@ -144,7 +151,8 @@ pub fn _mm512_set1_ph(a: f16) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_setr_ph( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_setr_ph( e0: f16, e1: f16, e2: f16, @@ -163,7 +171,8 @@ pub fn _mm_setr_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_setr_ph( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setr_ph( e0: f16, e1: f16, e2: f16, @@ -192,7 +201,8 @@ pub fn _mm256_setr_ph( #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_setr_ph( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setr_ph( e0: f16, e1: f16, e2: f16, @@ -238,7 +248,8 @@ pub fn _mm512_setr_ph( #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_setzero_ph() -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_setzero_ph() -> __m128h { unsafe { transmute(f16x8::ZERO) } } @@ -248,7 +259,8 @@ pub fn _mm_setzero_ph() -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_setzero_ph() -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_setzero_ph() -> __m256h { f16x16::ZERO.as_m256h() } @@ -258,7 +270,8 @@ pub fn _mm256_setzero_ph() -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_setzero_ph() -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_setzero_ph() -> __m512h { f16x32::ZERO.as_m512h() } @@ -271,7 +284,8 @@ pub fn _mm512_setzero_ph() -> __m512h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_undefined_ph() -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_undefined_ph() -> __m128h { f16x8::ZERO.as_m128h() } @@ -284,7 +298,8 @@ pub fn _mm_undefined_ph() -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_undefined_ph() -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_undefined_ph() -> __m256h { f16x16::ZERO.as_m256h() } @@ -297,7 +312,8 @@ pub fn _mm256_undefined_ph() -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_undefined_ph() -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_undefined_ph() -> __m512h { f16x32::ZERO.as_m512h() } @@ -308,7 +324,8 @@ pub fn _mm512_undefined_ph() -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_castpd_ph(a: __m128d) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castpd_ph(a: __m128d) -> __m128h { unsafe { transmute(a) } } @@ -319,7 +336,8 @@ pub fn _mm_castpd_ph(a: __m128d) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_castpd_ph(a: __m256d) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castpd_ph(a: __m256d) -> __m256h { unsafe { transmute(a) } } @@ -330,7 +348,8 @@ pub fn _mm256_castpd_ph(a: __m256d) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_castpd_ph(a: __m512d) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castpd_ph(a: __m512d) -> __m512h { unsafe { transmute(a) } } @@ -341,7 +360,8 @@ pub fn _mm512_castpd_ph(a: __m512d) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_castph_pd(a: __m128h) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castph_pd(a: __m128h) -> __m128d { unsafe { transmute(a) } } @@ -352,7 +372,8 @@ pub fn _mm_castph_pd(a: __m128h) -> __m128d { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_castph_pd(a: __m256h) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castph_pd(a: __m256h) -> __m256d { unsafe { transmute(a) } } @@ -363,7 +384,8 @@ pub fn _mm256_castph_pd(a: __m256h) -> __m256d { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_castph_pd(a: __m512h) -> __m512d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castph_pd(a: __m512h) -> __m512d { unsafe { transmute(a) } } @@ -374,7 +396,8 @@ pub fn _mm512_castph_pd(a: __m512h) -> __m512d { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_castps_ph(a: __m128) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castps_ph(a: __m128) -> __m128h { unsafe { transmute(a) } } @@ -385,7 +408,8 @@ pub fn _mm_castps_ph(a: __m128) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_castps_ph(a: __m256) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castps_ph(a: __m256) -> __m256h { unsafe { transmute(a) } } @@ -396,7 +420,8 @@ pub fn _mm256_castps_ph(a: __m256) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_castps_ph(a: __m512) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castps_ph(a: __m512) -> __m512h { unsafe { transmute(a) } } @@ -407,7 +432,8 @@ pub fn _mm512_castps_ph(a: __m512) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_castph_ps(a: __m128h) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castph_ps(a: __m128h) -> __m128 { unsafe { transmute(a) } } @@ -418,7 +444,8 @@ pub fn _mm_castph_ps(a: __m128h) -> __m128 { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_castph_ps(a: __m256h) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castph_ps(a: __m256h) -> __m256 { unsafe { transmute(a) } } @@ -429,7 +456,8 @@ pub fn _mm256_castph_ps(a: __m256h) -> __m256 { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_castph_ps(a: __m512h) -> __m512 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castph_ps(a: __m512h) -> __m512 { unsafe { transmute(a) } } @@ -440,7 +468,8 @@ pub fn _mm512_castph_ps(a: __m512h) -> __m512 { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_castsi128_ph(a: __m128i) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castsi128_ph(a: __m128i) -> __m128h { unsafe { transmute(a) } } @@ -451,7 +480,8 @@ pub fn _mm_castsi128_ph(a: __m128i) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_castsi256_ph(a: __m256i) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castsi256_ph(a: __m256i) -> __m256h { unsafe { transmute(a) } } @@ -462,7 +492,8 @@ pub fn _mm256_castsi256_ph(a: __m256i) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_castsi512_ph(a: __m512i) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castsi512_ph(a: __m512i) -> __m512h { unsafe { transmute(a) } } @@ -473,7 +504,8 @@ pub fn _mm512_castsi512_ph(a: __m512i) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_castph_si128(a: __m128h) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castph_si128(a: __m128h) -> __m128i { unsafe { transmute(a) } } @@ -484,7 +516,8 @@ pub fn _mm_castph_si128(a: __m128h) -> __m128i { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_castph_si256(a: __m256h) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castph_si256(a: __m256h) -> __m256i { unsafe { transmute(a) } } @@ -495,7 +528,8 @@ pub fn _mm256_castph_si256(a: __m256h) -> __m256i { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_castph_si512(a: __m512h) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castph_si512(a: __m512h) -> __m512i { unsafe { transmute(a) } } @@ -506,7 +540,8 @@ pub fn _mm512_castph_si512(a: __m512h) -> __m512i { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_castph256_ph128(a: __m256h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castph256_ph128(a: __m256h) -> __m128h { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } } @@ -517,7 +552,8 @@ pub fn _mm256_castph256_ph128(a: __m256h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_castph512_ph128(a: __m512h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castph512_ph128(a: __m512h) -> __m128h { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } } @@ -528,7 +564,8 @@ pub fn _mm512_castph512_ph128(a: __m512h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_castph512_ph256(a: __m512h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castph512_ph256(a: __m512h) -> __m256h { unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } } @@ -540,7 +577,8 @@ pub fn _mm512_castph512_ph256(a: __m512h) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_castph128_ph256(a: __m128h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_castph128_ph256(a: __m128h) -> __m256h { unsafe { simd_shuffle!( a, @@ -558,7 +596,8 @@ pub fn _mm256_castph128_ph256(a: __m128h) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_castph128_ph512(a: __m128h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castph128_ph512(a: __m128h) -> __m512h { unsafe { simd_shuffle!( a, @@ -579,7 +618,8 @@ pub fn _mm512_castph128_ph512(a: __m128h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_castph256_ph512(a: __m256h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_castph256_ph512(a: __m256h) -> __m512h { unsafe { simd_shuffle!( a, @@ -600,7 +640,8 @@ pub fn _mm512_castph256_ph512(a: __m256h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_zextph128_ph256(a: __m128h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_zextph128_ph256(a: __m128h) -> __m256h { unsafe { simd_shuffle!( a, @@ -618,7 +659,8 @@ pub fn _mm256_zextph128_ph256(a: __m128h) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_zextph256_ph512(a: __m256h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_zextph256_ph512(a: __m256h) -> __m512h { unsafe { simd_shuffle!( a, @@ -639,7 +681,8 @@ pub fn _mm512_zextph256_ph512(a: __m256h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_zextph128_ph512(a: __m128h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_zextph128_ph512(a: __m128h) -> __m512h { unsafe { simd_shuffle!( a, @@ -1083,7 +1126,8 @@ pub fn _mm_ucomineq_sh(a: __m128h, b: __m128h) -> i32 { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_load_ph(mem_addr: *const f16) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load_ph(mem_addr: *const f16) -> __m128h { *mem_addr.cast() } @@ -1094,7 +1138,8 @@ pub unsafe fn _mm_load_ph(mem_addr: *const f16) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm256_load_ph(mem_addr: *const f16) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_load_ph(mem_addr: *const f16) -> __m256h { *mem_addr.cast() } @@ -1105,7 +1150,8 @@ pub unsafe fn _mm256_load_ph(mem_addr: *const f16) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm512_load_ph(mem_addr: *const f16) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_load_ph(mem_addr: *const f16) -> __m512h { *mem_addr.cast() } @@ -1116,7 +1162,8 @@ pub unsafe fn _mm512_load_ph(mem_addr: *const f16) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_load_sh(mem_addr: *const f16) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load_sh(mem_addr: *const f16) -> __m128h { _mm_set_sh(*mem_addr) } @@ -1165,7 +1212,8 @@ pub unsafe fn _mm_maskz_load_sh(k: __mmask8, mem_addr: *const f16) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_loadu_ph(mem_addr: *const f16) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadu_ph(mem_addr: *const f16) -> __m128h { ptr::read_unaligned(mem_addr.cast()) } @@ -1176,7 +1224,8 @@ pub unsafe fn _mm_loadu_ph(mem_addr: *const f16) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm256_loadu_ph(mem_addr: *const f16) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_loadu_ph(mem_addr: *const f16) -> __m256h { ptr::read_unaligned(mem_addr.cast()) } @@ -1187,7 +1236,8 @@ pub unsafe fn _mm256_loadu_ph(mem_addr: *const f16) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm512_loadu_ph(mem_addr: *const f16) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_loadu_ph(mem_addr: *const f16) -> __m512h { ptr::read_unaligned(mem_addr.cast()) } @@ -1199,7 +1249,8 @@ pub unsafe fn _mm512_loadu_ph(mem_addr: *const f16) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_move_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_move_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let mut mov: f16 = simd_extract!(src, 0); if (k & 1) != 0 { @@ -1217,7 +1268,8 @@ pub fn _mm_mask_move_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __ #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_move_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_move_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let mut mov: f16 = 0.; if (k & 1) != 0 { @@ -1234,7 +1286,8 @@ pub fn _mm_maskz_move_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_move_sh(a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_move_sh(a: __m128h, b: __m128h) -> __m128h { unsafe { let mov: f16 = simd_extract!(b, 0); simd_insert!(a, 0, mov) @@ -1248,7 +1301,8 @@ pub fn _mm_move_sh(a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_store_ph(mem_addr: *mut f16, a: __m128h) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store_ph(mem_addr: *mut f16, a: __m128h) { *mem_addr.cast() = a; } @@ -1259,7 +1313,8 @@ pub unsafe fn _mm_store_ph(mem_addr: *mut f16, a: __m128h) { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm256_store_ph(mem_addr: *mut f16, a: __m256h) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_store_ph(mem_addr: *mut f16, a: __m256h) { *mem_addr.cast() = a; } @@ -1270,7 +1325,8 @@ pub unsafe fn _mm256_store_ph(mem_addr: *mut f16, a: __m256h) { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm512_store_ph(mem_addr: *mut f16, a: __m512h) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_store_ph(mem_addr: *mut f16, a: __m512h) { *mem_addr.cast() = a; } @@ -1280,7 +1336,8 @@ pub unsafe fn _mm512_store_ph(mem_addr: *mut f16, a: __m512h) { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_store_sh(mem_addr: *mut f16, a: __m128h) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store_sh(mem_addr: *mut f16, a: __m128h) { *mem_addr = simd_extract!(a, 0); } @@ -1307,7 +1364,8 @@ pub unsafe fn _mm_mask_store_sh(mem_addr: *mut f16, k: __mmask8, a: __m128h) { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_storeu_ph(mem_addr: *mut f16, a: __m128h) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeu_ph(mem_addr: *mut f16, a: __m128h) { ptr::write_unaligned(mem_addr.cast(), a); } @@ -1318,7 +1376,8 @@ pub unsafe fn _mm_storeu_ph(mem_addr: *mut f16, a: __m128h) { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm256_storeu_ph(mem_addr: *mut f16, a: __m256h) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm256_storeu_ph(mem_addr: *mut f16, a: __m256h) { ptr::write_unaligned(mem_addr.cast(), a); } @@ -1329,7 +1388,8 @@ pub unsafe fn _mm256_storeu_ph(mem_addr: *mut f16, a: __m256h) { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm512_storeu_ph(mem_addr: *mut f16, a: __m512h) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm512_storeu_ph(mem_addr: *mut f16, a: __m512h) { ptr::write_unaligned(mem_addr.cast(), a); } @@ -1340,7 +1400,8 @@ pub unsafe fn _mm512_storeu_ph(mem_addr: *mut f16, a: __m512h) { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vaddph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_add_ph(a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_add_ph(a: __m128h, b: __m128h) -> __m128h { unsafe { simd_add(a, b) } } @@ -1352,7 +1413,8 @@ pub fn _mm_add_ph(a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vaddph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_add_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_add_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let r = _mm_add_ph(a, b); simd_select_bitmask(k, r, src) @@ -1367,7 +1429,8 @@ pub fn _mm_mask_add_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vaddph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_add_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_add_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let r = _mm_add_ph(a, b); simd_select_bitmask(k, r, _mm_setzero_ph()) @@ -1381,7 +1444,8 @@ pub fn _mm_maskz_add_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vaddph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_add_ph(a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_add_ph(a: __m256h, b: __m256h) -> __m256h { unsafe { simd_add(a, b) } } @@ -1393,7 +1457,8 @@ pub fn _mm256_add_ph(a: __m256h, b: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vaddph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_add_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_add_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { unsafe { let r = _mm256_add_ph(a, b); simd_select_bitmask(k, r, src) @@ -1408,7 +1473,8 @@ pub fn _mm256_mask_add_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vaddph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_maskz_add_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_add_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { unsafe { let r = _mm256_add_ph(a, b); simd_select_bitmask(k, r, _mm256_setzero_ph()) @@ -1422,7 +1488,8 @@ pub fn _mm256_maskz_add_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_add_ph(a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_add_ph(a: __m512h, b: __m512h) -> __m512h { unsafe { simd_add(a, b) } } @@ -1434,7 +1501,8 @@ pub fn _mm512_add_ph(a: __m512h, b: __m512h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_add_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_add_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { unsafe { let r = _mm512_add_ph(a, b); simd_select_bitmask(k, r, src) @@ -1449,7 +1517,8 @@ pub fn _mm512_mask_add_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_maskz_add_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_add_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { unsafe { let r = _mm512_add_ph(a, b); simd_select_bitmask(k, r, _mm512_setzero_ph()) @@ -1614,7 +1683,8 @@ pub fn _mm_maskz_add_round_sh(k: __mmask8, a: __m128h, b: _ #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_add_sh(a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_add_sh(a: __m128h, b: __m128h) -> __m128h { unsafe { simd_insert!(a, 0, _mm_cvtsh_h(a) + _mm_cvtsh_h(b)) } } @@ -1627,7 +1697,8 @@ pub fn _mm_add_sh(a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_add_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_add_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let extractsrc: f16 = simd_extract!(src, 0); let mut add: f16 = extractsrc; @@ -1649,7 +1720,8 @@ pub fn _mm_mask_add_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_add_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_add_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let mut add: f16 = 0.; if (k & 0b00000001) != 0 { @@ -1668,7 +1740,8 @@ pub fn _mm_maskz_add_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vsubph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_sub_ph(a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sub_ph(a: __m128h, b: __m128h) -> __m128h { unsafe { simd_sub(a, b) } } @@ -1680,7 +1753,8 @@ pub fn _mm_sub_ph(a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vsubph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_sub_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sub_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let r = _mm_sub_ph(a, b); simd_select_bitmask(k, r, src) @@ -1695,7 +1769,8 @@ pub fn _mm_mask_sub_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vsubph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_sub_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sub_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let r = _mm_sub_ph(a, b); simd_select_bitmask(k, r, _mm_setzero_ph()) @@ -1709,7 +1784,8 @@ pub fn _mm_maskz_sub_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vsubph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_sub_ph(a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_sub_ph(a: __m256h, b: __m256h) -> __m256h { unsafe { simd_sub(a, b) } } @@ -1721,7 +1797,8 @@ pub fn _mm256_sub_ph(a: __m256h, b: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vsubph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_sub_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_sub_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { unsafe { let r = _mm256_sub_ph(a, b); simd_select_bitmask(k, r, src) @@ -1736,7 +1813,8 @@ pub fn _mm256_mask_sub_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vsubph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_maskz_sub_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_sub_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { unsafe { let r = _mm256_sub_ph(a, b); simd_select_bitmask(k, r, _mm256_setzero_ph()) @@ -1750,7 +1828,8 @@ pub fn _mm256_maskz_sub_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_sub_ph(a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_sub_ph(a: __m512h, b: __m512h) -> __m512h { unsafe { simd_sub(a, b) } } @@ -1762,7 +1841,8 @@ pub fn _mm512_sub_ph(a: __m512h, b: __m512h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_sub_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_sub_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { unsafe { let r = _mm512_sub_ph(a, b); simd_select_bitmask(k, r, src) @@ -1777,7 +1857,8 @@ pub fn _mm512_mask_sub_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_maskz_sub_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_sub_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { unsafe { let r = _mm512_sub_ph(a, b); simd_select_bitmask(k, r, _mm512_setzero_ph()) @@ -1943,7 +2024,8 @@ pub fn _mm_maskz_sub_round_sh(k: __mmask8, a: __m128h, b: _ #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_sub_sh(a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sub_sh(a: __m128h, b: __m128h) -> __m128h { unsafe { simd_insert!(a, 0, _mm_cvtsh_h(a) - _mm_cvtsh_h(b)) } } @@ -1956,7 +2038,8 @@ pub fn _mm_sub_sh(a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_sub_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_sub_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let extractsrc: f16 = simd_extract!(src, 0); let mut add: f16 = extractsrc; @@ -1978,7 +2061,8 @@ pub fn _mm_mask_sub_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vsubsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_sub_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_sub_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let mut add: f16 = 0.; if (k & 0b00000001) != 0 { @@ -1997,7 +2081,8 @@ pub fn _mm_maskz_sub_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vmulph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mul_ph(a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mul_ph(a: __m128h, b: __m128h) -> __m128h { unsafe { simd_mul(a, b) } } @@ -2009,7 +2094,8 @@ pub fn _mm_mul_ph(a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vmulph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_mul_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mul_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let r = _mm_mul_ph(a, b); simd_select_bitmask(k, r, src) @@ -2024,7 +2110,8 @@ pub fn _mm_mask_mul_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vmulph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_mul_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mul_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let r = _mm_mul_ph(a, b); simd_select_bitmask(k, r, _mm_setzero_ph()) @@ -2038,7 +2125,8 @@ pub fn _mm_maskz_mul_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vmulph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mul_ph(a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mul_ph(a: __m256h, b: __m256h) -> __m256h { unsafe { simd_mul(a, b) } } @@ -2050,7 +2138,8 @@ pub fn _mm256_mul_ph(a: __m256h, b: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vmulph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_mul_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_mul_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { unsafe { let r = _mm256_mul_ph(a, b); simd_select_bitmask(k, r, src) @@ -2065,7 +2154,8 @@ pub fn _mm256_mask_mul_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vmulph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_maskz_mul_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_mul_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { unsafe { let r = _mm256_mul_ph(a, b); simd_select_bitmask(k, r, _mm256_setzero_ph()) @@ -2079,7 +2169,8 @@ pub fn _mm256_maskz_mul_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mul_ph(a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mul_ph(a: __m512h, b: __m512h) -> __m512h { unsafe { simd_mul(a, b) } } @@ -2091,7 +2182,8 @@ pub fn _mm512_mul_ph(a: __m512h, b: __m512h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_mul_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_mul_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { unsafe { let r = _mm512_mul_ph(a, b); simd_select_bitmask(k, r, src) @@ -2106,7 +2198,8 @@ pub fn _mm512_mask_mul_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_maskz_mul_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_mul_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { unsafe { let r = _mm512_mul_ph(a, b); simd_select_bitmask(k, r, _mm512_setzero_ph()) @@ -2272,7 +2365,8 @@ pub fn _mm_maskz_mul_round_sh(k: __mmask8, a: __m128h, b: _ #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mul_sh(a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mul_sh(a: __m128h, b: __m128h) -> __m128h { unsafe { simd_insert!(a, 0, _mm_cvtsh_h(a) * _mm_cvtsh_h(b)) } } @@ -2285,7 +2379,8 @@ pub fn _mm_mul_sh(a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_mul_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_mul_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let extractsrc: f16 = simd_extract!(src, 0); let mut add: f16 = extractsrc; @@ -2307,7 +2402,8 @@ pub fn _mm_mask_mul_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vmulsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_mul_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_mul_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let mut add: f16 = 0.; if (k & 0b00000001) != 0 { @@ -2326,7 +2422,8 @@ pub fn _mm_maskz_mul_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vdivph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_div_ph(a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_div_ph(a: __m128h, b: __m128h) -> __m128h { unsafe { simd_div(a, b) } } @@ -2338,7 +2435,8 @@ pub fn _mm_div_ph(a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vdivph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_div_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_div_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let r = _mm_div_ph(a, b); simd_select_bitmask(k, r, src) @@ -2353,7 +2451,8 @@ pub fn _mm_mask_div_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vdivph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_div_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_div_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let r = _mm_div_ph(a, b); simd_select_bitmask(k, r, _mm_setzero_ph()) @@ -2367,7 +2466,8 @@ pub fn _mm_maskz_div_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vdivph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_div_ph(a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_div_ph(a: __m256h, b: __m256h) -> __m256h { unsafe { simd_div(a, b) } } @@ -2379,7 +2479,8 @@ pub fn _mm256_div_ph(a: __m256h, b: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vdivph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_div_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_div_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { unsafe { let r = _mm256_div_ph(a, b); simd_select_bitmask(k, r, src) @@ -2394,7 +2495,8 @@ pub fn _mm256_mask_div_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vdivph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_maskz_div_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_div_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { unsafe { let r = _mm256_div_ph(a, b); simd_select_bitmask(k, r, _mm256_setzero_ph()) @@ -2408,7 +2510,8 @@ pub fn _mm256_maskz_div_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_div_ph(a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_div_ph(a: __m512h, b: __m512h) -> __m512h { unsafe { simd_div(a, b) } } @@ -2420,7 +2523,8 @@ pub fn _mm512_div_ph(a: __m512h, b: __m512h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_div_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_div_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { unsafe { let r = _mm512_div_ph(a, b); simd_select_bitmask(k, r, src) @@ -2435,7 +2539,8 @@ pub fn _mm512_mask_div_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivph))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_maskz_div_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_div_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { unsafe { let r = _mm512_div_ph(a, b); simd_select_bitmask(k, r, _mm512_setzero_ph()) @@ -2601,7 +2706,8 @@ pub fn _mm_maskz_div_round_sh(k: __mmask8, a: __m128h, b: _ #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_div_sh(a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_div_sh(a: __m128h, b: __m128h) -> __m128h { unsafe { simd_insert!(a, 0, _mm_cvtsh_h(a) / _mm_cvtsh_h(b)) } } @@ -2614,7 +2720,8 @@ pub fn _mm_div_sh(a: __m128h, b: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_div_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_div_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let extractsrc: f16 = simd_extract!(src, 0); let mut add: f16 = extractsrc; @@ -2636,7 +2743,8 @@ pub fn _mm_mask_div_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vdivsh))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_div_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_div_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { let mut add: f16 = 0.; if (k & 0b00000001) != 0 { @@ -3967,7 +4075,8 @@ pub fn _mm_maskz_fcmul_round_sch( #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_abs_ph(v2: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_abs_ph(v2: __m128h) -> __m128h { unsafe { transmute(_mm_and_si128(transmute(v2), _mm_set1_epi16(i16::MAX))) } } @@ -3978,7 +4087,8 @@ pub fn _mm_abs_ph(v2: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_abs_ph(v2: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_abs_ph(v2: __m256h) -> __m256h { unsafe { transmute(_mm256_and_si256(transmute(v2), _mm256_set1_epi16(i16::MAX))) } } @@ -3989,7 +4099,8 @@ pub fn _mm256_abs_ph(v2: __m256h) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_abs_ph(v2: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_abs_ph(v2: __m512h) -> __m512h { unsafe { transmute(_mm512_and_si512(transmute(v2), _mm512_set1_epi16(i16::MAX))) } } @@ -4002,7 +4113,8 @@ pub fn _mm512_abs_ph(v2: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_conj_pch(a: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_conj_pch(a: __m128h) -> __m128h { unsafe { transmute(_mm_xor_si128(transmute(a), _mm_set1_epi32(i32::MIN))) } } @@ -4015,7 +4127,8 @@ pub fn _mm_conj_pch(a: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_conj_pch(src: __m128h, k: __mmask8, a: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_conj_pch(src: __m128h, k: __mmask8, a: __m128h) -> __m128h { unsafe { let r: __m128 = transmute(_mm_conj_pch(a)); transmute(simd_select_bitmask(k, r, transmute(src))) @@ -4031,7 +4144,8 @@ pub fn _mm_mask_conj_pch(src: __m128h, k: __mmask8, a: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_conj_pch(k: __mmask8, a: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_conj_pch(k: __mmask8, a: __m128h) -> __m128h { _mm_mask_conj_pch(_mm_setzero_ph(), k, a) } @@ -4043,7 +4157,8 @@ pub fn _mm_maskz_conj_pch(k: __mmask8, a: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_conj_pch(a: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_conj_pch(a: __m256h) -> __m256h { unsafe { transmute(_mm256_xor_si256(transmute(a), _mm256_set1_epi32(i32::MIN))) } } @@ -4056,7 +4171,8 @@ pub fn _mm256_conj_pch(a: __m256h) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_conj_pch(src: __m256h, k: __mmask8, a: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_conj_pch(src: __m256h, k: __mmask8, a: __m256h) -> __m256h { unsafe { let r: __m256 = transmute(_mm256_conj_pch(a)); transmute(simd_select_bitmask(k, r, transmute(src))) @@ -4072,7 +4188,8 @@ pub fn _mm256_mask_conj_pch(src: __m256h, k: __mmask8, a: __m256h) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_maskz_conj_pch(k: __mmask8, a: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_conj_pch(k: __mmask8, a: __m256h) -> __m256h { _mm256_mask_conj_pch(_mm256_setzero_ph(), k, a) } @@ -4084,7 +4201,8 @@ pub fn _mm256_maskz_conj_pch(k: __mmask8, a: __m256h) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_conj_pch(a: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_conj_pch(a: __m512h) -> __m512h { unsafe { transmute(_mm512_xor_si512(transmute(a), _mm512_set1_epi32(i32::MIN))) } } @@ -4097,7 +4215,8 @@ pub fn _mm512_conj_pch(a: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_conj_pch(src: __m512h, k: __mmask16, a: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_conj_pch(src: __m512h, k: __mmask16, a: __m512h) -> __m512h { unsafe { let r: __m512 = transmute(_mm512_conj_pch(a)); transmute(simd_select_bitmask(k, r, transmute(src))) @@ -4113,7 +4232,8 @@ pub fn _mm512_mask_conj_pch(src: __m512h, k: __mmask16, a: __m512h) -> __m512h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_maskz_conj_pch(k: __mmask16, a: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_conj_pch(k: __mmask16, a: __m512h) -> __m512h { _mm512_mask_conj_pch(_mm512_setzero_ph(), k, a) } @@ -5198,7 +5318,8 @@ pub fn _mm_maskz_fcmadd_round_sch( #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_fmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_fma(a, b, c) } } @@ -5211,7 +5332,8 @@ pub fn _mm_fmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_fmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmadd_ph(a, b, c), a) } } @@ -5224,7 +5346,8 @@ pub fn _mm_mask_fmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask3_fmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmadd_ph(a, b, c), c) } } @@ -5237,7 +5360,8 @@ pub fn _mm_mask3_fmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __ #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_fmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmadd_ph(a, b, c), _mm_setzero_ph()) } } @@ -5249,7 +5373,8 @@ pub fn _mm_maskz_fmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __ #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_fmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_fma(a, b, c) } } @@ -5262,7 +5387,8 @@ pub fn _mm256_fmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_fmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmadd_ph(a, b, c), a) } } @@ -5275,7 +5401,8 @@ pub fn _mm256_mask_fmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask3_fmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmadd_ph(a, b, c), c) } } @@ -5288,7 +5415,8 @@ pub fn _mm256_mask3_fmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) - #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_maskz_fmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmadd_ph(a, b, c), _mm256_setzero_ph()) } } @@ -5300,7 +5428,8 @@ pub fn _mm256_maskz_fmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) - #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_fmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_fma(a, b, c) } } @@ -5313,7 +5442,8 @@ pub fn _mm512_fmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_fmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmadd_ph(a, b, c), a) } } @@ -5326,7 +5456,8 @@ pub fn _mm512_mask_fmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask3_fmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmadd_ph(a, b, c), c) } } @@ -5339,7 +5470,8 @@ pub fn _mm512_mask3_fmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) - #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_maskz_fmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmadd_ph(a, b, c), _mm512_setzero_ph()) } } @@ -5470,7 +5602,8 @@ pub fn _mm512_maskz_fmadd_round_ph( #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_fmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { let extracta: f16 = simd_extract!(a, 0); let extractb: f16 = simd_extract!(b, 0); @@ -5490,7 +5623,8 @@ pub fn _mm_fmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_fmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { unsafe { let mut fmadd: f16 = simd_extract!(a, 0); if k & 1 != 0 { @@ -5512,7 +5646,8 @@ pub fn _mm_mask_fmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask3_fmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { unsafe { let mut fmadd: f16 = simd_extract!(c, 0); if k & 1 != 0 { @@ -5534,7 +5669,8 @@ pub fn _mm_mask3_fmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __ #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_fmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { let mut fmadd: f16 = 0.0; if k & 1 != 0 { @@ -5697,7 +5833,8 @@ pub fn _mm_maskz_fmadd_round_sh( #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_fmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_fma(a, b, simd_neg(c)) } } @@ -5710,7 +5847,8 @@ pub fn _mm_fmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_fmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmsub_ph(a, b, c), a) } } @@ -5723,7 +5861,8 @@ pub fn _mm_mask_fmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask3_fmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmsub_ph(a, b, c), c) } } @@ -5736,7 +5875,8 @@ pub fn _mm_mask3_fmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __ #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_fmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmsub_ph(a, b, c), _mm_setzero_ph()) } } @@ -5748,7 +5888,8 @@ pub fn _mm_maskz_fmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __ #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_fmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_fma(a, b, simd_neg(c)) } } @@ -5761,7 +5902,8 @@ pub fn _mm256_fmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_fmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmsub_ph(a, b, c), a) } } @@ -5774,7 +5916,8 @@ pub fn _mm256_mask_fmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask3_fmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmsub_ph(a, b, c), c) } } @@ -5787,7 +5930,8 @@ pub fn _mm256_mask3_fmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) - #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_maskz_fmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmsub_ph(a, b, c), _mm256_setzero_ph()) } } @@ -5799,7 +5943,8 @@ pub fn _mm256_maskz_fmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) - #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_fmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_fma(a, b, simd_neg(c)) } } @@ -5812,7 +5957,8 @@ pub fn _mm512_fmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_fmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmsub_ph(a, b, c), a) } } @@ -5825,7 +5971,8 @@ pub fn _mm512_mask_fmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask3_fmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmsub_ph(a, b, c), c) } } @@ -5838,7 +5985,8 @@ pub fn _mm512_mask3_fmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) - #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_maskz_fmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmsub_ph(a, b, c), _mm512_setzero_ph()) } } @@ -5969,7 +6117,8 @@ pub fn _mm512_maskz_fmsub_round_ph( #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_fmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { let extracta: f16 = simd_extract!(a, 0); let extractb: f16 = simd_extract!(b, 0); @@ -5989,7 +6138,8 @@ pub fn _mm_fmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_fmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { unsafe { let mut fmsub: f16 = simd_extract!(a, 0); if k & 1 != 0 { @@ -6011,7 +6161,8 @@ pub fn _mm_mask_fmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask3_fmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { unsafe { let mut fmsub: f16 = simd_extract!(c, 0); if k & 1 != 0 { @@ -6033,7 +6184,8 @@ pub fn _mm_mask3_fmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __ #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_fmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { let mut fmsub: f16 = 0.0; if k & 1 != 0 { @@ -6187,7 +6339,8 @@ pub fn _mm_maskz_fmsub_round_sh( #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_fma(simd_neg(a), b, c) } } @@ -6200,7 +6353,8 @@ pub fn _mm_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_fnmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fnmadd_ph(a, b, c), a) } } @@ -6213,7 +6367,8 @@ pub fn _mm_mask_fnmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __ #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask3_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fnmadd_ph(a, b, c), c) } } @@ -6226,7 +6381,8 @@ pub fn _mm_mask3_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> _ #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_fnmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fnmadd_ph(a, b, c), _mm_setzero_ph()) } } @@ -6238,7 +6394,8 @@ pub fn _mm_maskz_fnmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> _ #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_fma(simd_neg(a), b, c) } } @@ -6251,7 +6408,8 @@ pub fn _mm256_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_fnmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fnmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fnmadd_ph(a, b, c), a) } } @@ -6264,7 +6422,8 @@ pub fn _mm256_mask_fnmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) - #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask3_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fnmadd_ph(a, b, c), c) } } @@ -6277,7 +6436,8 @@ pub fn _mm256_mask3_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_maskz_fnmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fnmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fnmadd_ph(a, b, c), _mm256_setzero_ph()) } } @@ -6289,7 +6449,8 @@ pub fn _mm256_maskz_fnmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_fma(simd_neg(a), b, c) } } @@ -6302,7 +6463,8 @@ pub fn _mm512_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_fnmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fnmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fnmadd_ph(a, b, c), a) } } @@ -6315,7 +6477,8 @@ pub fn _mm512_mask_fnmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) - #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask3_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fnmadd_ph(a, b, c), c) } } @@ -6328,7 +6491,8 @@ pub fn _mm512_mask3_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_maskz_fnmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fnmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fnmadd_ph(a, b, c), _mm512_setzero_ph()) } } @@ -6459,7 +6623,8 @@ pub fn _mm512_maskz_fnmadd_round_ph( #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { let extracta: f16 = simd_extract!(a, 0); let extractb: f16 = simd_extract!(b, 0); @@ -6479,7 +6644,8 @@ pub fn _mm_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_fnmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { unsafe { let mut fnmadd: f16 = simd_extract!(a, 0); if k & 1 != 0 { @@ -6501,7 +6667,8 @@ pub fn _mm_mask_fnmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __ #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask3_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { unsafe { let mut fnmadd: f16 = simd_extract!(c, 0); if k & 1 != 0 { @@ -6523,7 +6690,8 @@ pub fn _mm_mask3_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> _ #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_fnmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { let mut fnmadd: f16 = 0.0; if k & 1 != 0 { @@ -6685,7 +6853,8 @@ pub fn _mm_maskz_fnmadd_round_sh( #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) } } @@ -6698,7 +6867,8 @@ pub fn _mm_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_fnmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fnmsub_ph(a, b, c), a) } } @@ -6711,7 +6881,8 @@ pub fn _mm_mask_fnmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __ #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask3_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fnmsub_ph(a, b, c), c) } } @@ -6724,7 +6895,8 @@ pub fn _mm_mask3_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> _ #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_fnmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fnmsub_ph(a, b, c), _mm_setzero_ph()) } } @@ -6736,7 +6908,8 @@ pub fn _mm_maskz_fnmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> _ #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) } } @@ -6749,7 +6922,8 @@ pub fn _mm256_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_fnmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fnmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fnmsub_ph(a, b, c), a) } } @@ -6762,7 +6936,8 @@ pub fn _mm256_mask_fnmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) - #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask3_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fnmsub_ph(a, b, c), c) } } @@ -6775,7 +6950,8 @@ pub fn _mm256_mask3_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_maskz_fnmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fnmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fnmsub_ph(a, b, c), _mm256_setzero_ph()) } } @@ -6787,7 +6963,8 @@ pub fn _mm256_maskz_fnmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) } } @@ -6800,7 +6977,8 @@ pub fn _mm512_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_fnmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fnmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fnmsub_ph(a, b, c), a) } } @@ -6813,7 +6991,8 @@ pub fn _mm512_mask_fnmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) - #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask3_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fnmsub_ph(a, b, c), c) } } @@ -6826,7 +7005,8 @@ pub fn _mm512_mask3_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_maskz_fnmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fnmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fnmsub_ph(a, b, c), _mm512_setzero_ph()) } } @@ -6957,7 +7137,8 @@ pub fn _mm512_maskz_fnmsub_round_ph( #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { let extracta: f16 = simd_extract!(a, 0); let extractb: f16 = simd_extract!(b, 0); @@ -6977,7 +7158,8 @@ pub fn _mm_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_fnmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fnmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { unsafe { let mut fnmsub: f16 = simd_extract!(a, 0); if k & 1 != 0 { @@ -6999,7 +7181,8 @@ pub fn _mm_mask_fnmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __ #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask3_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { unsafe { let mut fnmsub: f16 = simd_extract!(c, 0); if k & 1 != 0 { @@ -7021,7 +7204,8 @@ pub fn _mm_mask3_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> _ #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfnmsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_fnmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fnmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { let mut fnmsub: f16 = 0.0; if k & 1 != 0 { @@ -7183,7 +7367,8 @@ pub fn _mm_maskz_fnmsub_round_sh( #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -7200,7 +7385,8 @@ pub fn _mm_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_fmaddsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmaddsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmaddsub_ph(a, b, c), a) } } @@ -7213,7 +7399,8 @@ pub fn _mm_mask_fmaddsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask3_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmaddsub_ph(a, b, c), c) } } @@ -7226,7 +7413,8 @@ pub fn _mm_mask3_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_fmaddsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmaddsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmaddsub_ph(a, b, c), _mm_setzero_ph()) } } @@ -7238,7 +7426,8 @@ pub fn _mm_maskz_fmaddsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_fmaddsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmaddsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -7259,7 +7448,8 @@ pub fn _mm256_fmaddsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_fmaddsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmaddsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmaddsub_ph(a, b, c), a) } } @@ -7272,7 +7462,8 @@ pub fn _mm256_mask_fmaddsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask3_fmaddsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmaddsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmaddsub_ph(a, b, c), c) } } @@ -7285,7 +7476,8 @@ pub fn _mm256_mask3_fmaddsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16 #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_maskz_fmaddsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmaddsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmaddsub_ph(a, b, c), _mm256_setzero_ph()) } } @@ -7297,7 +7489,8 @@ pub fn _mm256_maskz_fmaddsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_fmaddsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmaddsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -7321,7 +7514,8 @@ pub fn _mm512_fmaddsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_fmaddsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmaddsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmaddsub_ph(a, b, c), a) } } @@ -7334,7 +7528,8 @@ pub fn _mm512_mask_fmaddsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask3_fmaddsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmaddsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmaddsub_ph(a, b, c), c) } } @@ -7347,7 +7542,8 @@ pub fn _mm512_mask3_fmaddsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32 #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_maskz_fmaddsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmaddsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmaddsub_ph(a, b, c), _mm512_setzero_ph()) } } @@ -7481,7 +7677,8 @@ pub fn _mm512_maskz_fmaddsub_round_ph( #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { _mm_fmaddsub_ph(a, b, unsafe { simd_neg(c) }) } @@ -7494,7 +7691,8 @@ pub fn _mm_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_fmsubadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_fmsubadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmsubadd_ph(a, b, c), a) } } @@ -7507,7 +7705,8 @@ pub fn _mm_mask_fmsubadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask3_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask3_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmsubadd_ph(a, b, c), c) } } @@ -7520,7 +7719,8 @@ pub fn _mm_mask3_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_maskz_fmsubadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_fmsubadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, _mm_fmsubadd_ph(a, b, c), _mm_setzero_ph()) } } @@ -7532,7 +7732,8 @@ pub fn _mm_maskz_fmsubadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_fmsubadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmsubadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { _mm256_fmaddsub_ph(a, b, unsafe { simd_neg(c) }) } @@ -7545,7 +7746,8 @@ pub fn _mm256_fmsubadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_fmsubadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_fmsubadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmsubadd_ph(a, b, c), a) } } @@ -7558,7 +7760,8 @@ pub fn _mm256_mask_fmsubadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask3_fmsubadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask3_fmsubadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmsubadd_ph(a, b, c), c) } } @@ -7571,7 +7774,8 @@ pub fn _mm256_mask3_fmsubadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16 #[target_feature(enable = "avx512fp16,avx512vl")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_maskz_fmsubadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_fmsubadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, _mm256_fmsubadd_ph(a, b, c), _mm256_setzero_ph()) } } @@ -7583,7 +7787,8 @@ pub fn _mm256_maskz_fmsubadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_fmsubadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_fmsubadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { _mm512_fmaddsub_ph(a, b, unsafe { simd_neg(c) }) } @@ -7596,7 +7801,8 @@ pub fn _mm512_fmsubadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_fmsubadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_fmsubadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmsubadd_ph(a, b, c), a) } } @@ -7609,7 +7815,8 @@ pub fn _mm512_mask_fmsubadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask3_fmsubadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask3_fmsubadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmsubadd_ph(a, b, c), c) } } @@ -7622,7 +7829,8 @@ pub fn _mm512_mask3_fmsubadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32 #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_maskz_fmsubadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_fmsubadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, _mm512_fmsubadd_ph(a, b, c), _mm512_setzero_ph()) } } @@ -11117,7 +11325,8 @@ pub fn _mm_maskz_reduce_round_sh( #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_reduce_add_ph(a: __m128h) -> f16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_add_ph(a: __m128h) -> f16 { unsafe { let b = simd_shuffle!(a, a, [4, 5, 6, 7, 0, 1, 2, 3]); let a = _mm_add_ph(a, b); @@ -11134,7 +11343,8 @@ pub fn _mm_reduce_add_ph(a: __m128h) -> f16 { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_reduce_add_ph(a: __m256h) -> f16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_add_ph(a: __m256h) -> f16 { unsafe { let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); let q = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -11149,7 +11359,8 @@ pub fn _mm256_reduce_add_ph(a: __m256h) -> f16 { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_reduce_add_ph(a: __m512h) -> f16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_add_ph(a: __m512h) -> f16 { unsafe { let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); let q = simd_shuffle!( @@ -11170,7 +11381,8 @@ pub fn _mm512_reduce_add_ph(a: __m512h) -> f16 { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_reduce_mul_ph(a: __m128h) -> f16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_reduce_mul_ph(a: __m128h) -> f16 { unsafe { let b = simd_shuffle!(a, a, [4, 5, 6, 7, 0, 1, 2, 3]); let a = _mm_mul_ph(a, b); @@ -11187,7 +11399,8 @@ pub fn _mm_reduce_mul_ph(a: __m128h) -> f16 { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_reduce_mul_ph(a: __m256h) -> f16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_reduce_mul_ph(a: __m256h) -> f16 { unsafe { let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); let q = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -11202,7 +11415,8 @@ pub fn _mm256_reduce_mul_ph(a: __m256h) -> f16 { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_reduce_mul_ph(a: __m512h) -> f16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_reduce_mul_ph(a: __m512h) -> f16 { unsafe { let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); let q = simd_shuffle!( @@ -11566,7 +11780,8 @@ pub fn _mm_mask_fpclass_sh_mask(k1: __mmask8, a: __m128h) -> __ #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_mask_blend_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_blend_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { unsafe { simd_select_bitmask(k, b, a) } } @@ -11577,7 +11792,8 @@ pub fn _mm_mask_blend_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { #[inline] #[target_feature(enable = "avx512fp16,avx512vl")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_mask_blend_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_blend_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { unsafe { simd_select_bitmask(k, b, a) } } @@ -11588,7 +11804,8 @@ pub fn _mm256_mask_blend_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_mask_blend_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_blend_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { unsafe { simd_select_bitmask(k, b, a) } } @@ -16304,7 +16521,8 @@ pub fn _mm_maskz_cvt_roundsh_sd(k: __mmask8, a: __m128d, b: __m1 #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_cvtsh_h(a: __m128h) -> f16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsh_h(a: __m128h) -> f16 { unsafe { simd_extract!(a, 0) } } @@ -16314,7 +16532,8 @@ pub fn _mm_cvtsh_h(a: __m128h) -> f16 { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm256_cvtsh_h(a: __m256h) -> f16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtsh_h(a: __m256h) -> f16 { unsafe { simd_extract!(a, 0) } } @@ -16324,7 +16543,8 @@ pub fn _mm256_cvtsh_h(a: __m256h) -> f16 { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm512_cvtsh_h(a: __m512h) -> f16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_cvtsh_h(a: __m512h) -> f16 { unsafe { simd_extract!(a, 0) } } @@ -16334,7 +16554,8 @@ pub fn _mm512_cvtsh_h(a: __m512h) -> f16 { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_cvtsi128_si16(a: __m128i) -> i16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi128_si16(a: __m128i) -> i16 { unsafe { simd_extract!(a.as_i16x8(), 0) } } @@ -16344,7 +16565,8 @@ pub fn _mm_cvtsi128_si16(a: __m128i) -> i16 { #[inline] #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub fn _mm_cvtsi16_si128(a: i16) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi16_si128(a: i16) -> __m128i { unsafe { transmute(simd_insert!(i16x8::ZERO, 0, a)) } } @@ -16708,25 +16930,29 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use crate::core_arch::x86::*; use crate::mem::transmute; use crate::ptr::{addr_of, addr_of_mut}; use stdarch_test::simd_test; #[target_feature(enable = "avx512fp16")] - unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + const fn _mm_set1_pch(re: f16, im: f16) -> __m128h { _mm_setr_ph(re, im, re, im, re, im, re, im) } #[target_feature(enable = "avx512fp16")] - unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + const fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { _mm256_setr_ph( re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, ) } #[target_feature(enable = "avx512fp16")] - unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + const fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { _mm512_setr_ph( re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, @@ -16734,14 +16960,14 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_set_ph() { + const unsafe fn test_mm_set_ph() { let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_set_ph() { + const unsafe fn test_mm256_set_ph() { let r = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -16752,7 +16978,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set_ph() { + const unsafe fn test_mm512_set_ph() { let r = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -16767,21 +16993,21 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_set_sh() { + const unsafe fn test_mm_set_sh() { let r = _mm_set_sh(1.0); let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_set1_ph() { + const unsafe fn test_mm_set1_ph() { let r = _mm_set1_ph(1.0); let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_set1_ph() { + const unsafe fn test_mm256_set1_ph() { let r = _mm256_set1_ph(1.0); let e = _mm256_set_ph( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, @@ -16790,7 +17016,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set1_ph() { + const unsafe fn test_mm512_set1_ph() { let r = _mm512_set1_ph(1.0); let e = _mm512_set_ph( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, @@ -16800,14 +17026,14 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_setr_ph() { + const unsafe fn test_mm_setr_ph() { let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_setr_ph() { + const unsafe fn test_mm256_setr_ph() { let r = _mm256_setr_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -16818,7 +17044,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setr_ph() { + const unsafe fn test_mm512_setr_ph() { let r = _mm512_setr_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -16833,28 +17059,28 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_setzero_ph() { + const unsafe fn test_mm_setzero_ph() { let r = _mm_setzero_ph(); let e = _mm_set1_ph(0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_setzero_ph() { + const unsafe fn test_mm256_setzero_ph() { let r = _mm256_setzero_ph(); let e = _mm256_set1_ph(0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setzero_ph() { + const unsafe fn test_mm512_setzero_ph() { let r = _mm512_setzero_ph(); let e = _mm512_set1_ph(0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_castsi128_ph() { + const unsafe fn test_mm_castsi128_ph() { let a = _mm_set1_epi16(0x3c00); let r = _mm_castsi128_ph(a); let e = _mm_set1_ph(1.0); @@ -16862,7 +17088,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_castsi256_ph() { + const unsafe fn test_mm256_castsi256_ph() { let a = _mm256_set1_epi16(0x3c00); let r = _mm256_castsi256_ph(a); let e = _mm256_set1_ph(1.0); @@ -16870,7 +17096,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castsi512_ph() { + const unsafe fn test_mm512_castsi512_ph() { let a = _mm512_set1_epi16(0x3c00); let r = _mm512_castsi512_ph(a); let e = _mm512_set1_ph(1.0); @@ -16878,7 +17104,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_si128() { + const unsafe fn test_mm_castph_si128() { let a = _mm_set1_ph(1.0); let r = _mm_castph_si128(a); let e = _mm_set1_epi16(0x3c00); @@ -16886,7 +17112,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_si256() { + const unsafe fn test_mm256_castph_si256() { let a = _mm256_set1_ph(1.0); let r = _mm256_castph_si256(a); let e = _mm256_set1_epi16(0x3c00); @@ -16894,7 +17120,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_si512() { + const unsafe fn test_mm512_castph_si512() { let a = _mm512_set1_ph(1.0); let r = _mm512_castph_si512(a); let e = _mm512_set1_epi16(0x3c00); @@ -16902,7 +17128,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_castps_ph() { + const unsafe fn test_mm_castps_ph() { let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); let r = _mm_castps_ph(a); let e = _mm_set1_ph(1.0); @@ -16910,7 +17136,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_castps_ph() { + const unsafe fn test_mm256_castps_ph() { let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); let r = _mm256_castps_ph(a); let e = _mm256_set1_ph(1.0); @@ -16918,7 +17144,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castps_ph() { + const unsafe fn test_mm512_castps_ph() { let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); let r = _mm512_castps_ph(a); let e = _mm512_set1_ph(1.0); @@ -16926,7 +17152,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_ps() { + const unsafe fn test_mm_castph_ps() { let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); let r = _mm_castph_ps(a); let e = _mm_set1_ps(1.0); @@ -16934,7 +17160,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_ps() { + const unsafe fn test_mm256_castph_ps() { let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); let r = _mm256_castph_ps(a); let e = _mm256_set1_ps(1.0); @@ -16942,7 +17168,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_ps() { + const unsafe fn test_mm512_castph_ps() { let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); let r = _mm512_castph_ps(a); let e = _mm512_set1_ps(1.0); @@ -16950,7 +17176,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_castpd_ph() { + const unsafe fn test_mm_castpd_ph() { let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); let r = _mm_castpd_ph(a); let e = _mm_set1_ph(1.0); @@ -16958,7 +17184,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_castpd_ph() { + const unsafe fn test_mm256_castpd_ph() { let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); let r = _mm256_castpd_ph(a); let e = _mm256_set1_ph(1.0); @@ -16966,7 +17192,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castpd_ph() { + const unsafe fn test_mm512_castpd_ph() { let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); let r = _mm512_castpd_ph(a); let e = _mm512_set1_ph(1.0); @@ -16974,7 +17200,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_pd() { + const unsafe fn test_mm_castph_pd() { let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); let r = _mm_castph_pd(a); let e = _mm_set1_pd(1.0); @@ -16982,7 +17208,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_pd() { + const unsafe fn test_mm256_castph_pd() { let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); let r = _mm256_castph_pd(a); let e = _mm256_set1_pd(1.0); @@ -16990,7 +17216,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_pd() { + const unsafe fn test_mm512_castph_pd() { let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); let r = _mm512_castph_pd(a); let e = _mm512_set1_pd(1.0); @@ -16998,7 +17224,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_castph256_ph128() { + const unsafe fn test_mm256_castph256_ph128() { let a = _mm256_setr_ph( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -17008,7 +17234,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm512_castph512_ph128() { + const unsafe fn test_mm512_castph512_ph128() { let a = _mm512_setr_ph( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., @@ -17019,7 +17245,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm512_castph512_ph256() { + const unsafe fn test_mm512_castph512_ph256() { let a = _mm512_setr_ph( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., @@ -17032,21 +17258,21 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_castph128_ph256() { + const unsafe fn test_mm256_castph128_ph256() { let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_castph128_ph256(a); assert_eq_m128h(_mm256_castph256_ph128(r), a); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm512_castph128_ph512() { + const unsafe fn test_mm512_castph128_ph512() { let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm512_castph128_ph512(a); assert_eq_m128h(_mm512_castph512_ph128(r), a); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm512_castph256_ph512() { + const unsafe fn test_mm512_castph256_ph512() { let a = _mm256_setr_ph( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -17055,7 +17281,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_zextph128_ph256() { + const unsafe fn test_mm256_zextph128_ph256() { let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_zextph128_ph256(a); let e = _mm256_setr_ph( @@ -17065,7 +17291,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph128_ph512() { + const unsafe fn test_mm512_zextph128_ph512() { let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm512_zextph128_ph512(a); let e = _mm512_setr_ph( @@ -17076,7 +17302,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph256_ph512() { + const unsafe fn test_mm512_zextph256_ph512() { let a = _mm256_setr_ph( 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., ); @@ -17343,14 +17569,14 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_load_ph() { + const unsafe fn test_mm_load_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_load_ph(addr_of!(a).cast()); assert_eq_m128h(a, b); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_load_ph() { + const unsafe fn test_mm256_load_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -17359,7 +17585,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_load_ph() { + const unsafe fn test_mm512_load_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -17370,7 +17596,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_load_sh() { + const unsafe fn test_mm_load_sh() { let a = _mm_set_sh(1.0); let b = _mm_load_sh(addr_of!(a).cast()); assert_eq_m128h(a, b); @@ -17396,7 +17622,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_loadu_ph() { + const unsafe fn test_mm_loadu_ph() { let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; let r = _mm_loadu_ph(array.as_ptr()); let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); @@ -17404,7 +17630,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_loadu_ph() { + const unsafe fn test_mm256_loadu_ph() { let array = [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ]; @@ -17416,7 +17642,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_loadu_ph() { + const unsafe fn test_mm512_loadu_ph() { let array = [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -17432,7 +17658,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_move_sh() { + const unsafe fn test_mm_move_sh() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_sh(9.0); let r = _mm_move_sh(a, b); @@ -17441,7 +17667,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_move_sh() { + const unsafe fn test_mm_mask_move_sh() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_sh(9.0); let src = _mm_set_sh(10.0); @@ -17451,7 +17677,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_move_sh() { + const unsafe fn test_mm_maskz_move_sh() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_sh(9.0); let r = _mm_maskz_move_sh(0, a, b); @@ -17460,7 +17686,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_store_ph() { + const unsafe fn test_mm_store_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let mut b = _mm_setzero_ph(); _mm_store_ph(addr_of_mut!(b).cast(), a); @@ -17468,7 +17694,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_store_ph() { + const unsafe fn test_mm256_store_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -17478,7 +17704,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_store_ph() { + const unsafe fn test_mm512_store_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -17490,7 +17716,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_store_sh() { + const unsafe fn test_mm_store_sh() { let a = _mm_set_sh(1.0); let mut b = _mm_setzero_ph(); _mm_store_sh(addr_of_mut!(b).cast(), a); @@ -17508,7 +17734,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_storeu_ph() { + const unsafe fn test_mm_storeu_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let mut array = [0.0; 8]; _mm_storeu_ph(array.as_mut_ptr(), a); @@ -17516,7 +17742,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_storeu_ph() { + const unsafe fn test_mm256_storeu_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -17526,7 +17752,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_storeu_ph() { + const unsafe fn test_mm512_storeu_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -17538,7 +17764,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_add_ph() { + const unsafe fn test_mm_add_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); let r = _mm_add_ph(a, b); @@ -17547,7 +17773,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_add_ph() { + const unsafe fn test_mm_mask_add_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); @@ -17557,7 +17783,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_add_ph() { + const unsafe fn test_mm_maskz_add_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); let r = _mm_maskz_add_ph(0b01010101, a, b); @@ -17566,7 +17792,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_add_ph() { + const unsafe fn test_mm256_add_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -17579,7 +17805,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_add_ph() { + const unsafe fn test_mm256_mask_add_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -17597,7 +17823,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_add_ph() { + const unsafe fn test_mm256_maskz_add_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -17612,7 +17838,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_ph() { + const unsafe fn test_mm512_add_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -17629,7 +17855,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_ph() { + const unsafe fn test_mm512_mask_add_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -17653,7 +17879,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_ph() { + const unsafe fn test_mm512_maskz_add_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -17783,7 +18009,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_add_sh() { + const unsafe fn test_mm_add_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let r = _mm_add_sh(a, b); @@ -17792,7 +18018,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_add_sh() { + const unsafe fn test_mm_mask_add_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let src = _mm_set_sh(4.0); @@ -17805,7 +18031,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_add_sh() { + const unsafe fn test_mm_maskz_add_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let r = _mm_maskz_add_sh(0, a, b); @@ -17817,7 +18043,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_sub_ph() { + const unsafe fn test_mm_sub_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); let r = _mm_sub_ph(a, b); @@ -17826,7 +18052,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_sub_ph() { + const unsafe fn test_mm_mask_sub_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); @@ -17836,7 +18062,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_sub_ph() { + const unsafe fn test_mm_maskz_sub_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); let r = _mm_maskz_sub_ph(0b01010101, a, b); @@ -17845,7 +18071,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_sub_ph() { + const unsafe fn test_mm256_sub_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -17861,7 +18087,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_sub_ph() { + const unsafe fn test_mm256_mask_sub_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -17879,7 +18105,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_sub_ph() { + const unsafe fn test_mm256_maskz_sub_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -17894,7 +18120,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_ph() { + const unsafe fn test_mm512_sub_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -17915,7 +18141,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_ph() { + const unsafe fn test_mm512_mask_sub_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -17939,7 +18165,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_ph() { + const unsafe fn test_mm512_maskz_sub_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -18073,7 +18299,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_sub_sh() { + const unsafe fn test_mm_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let r = _mm_sub_sh(a, b); @@ -18082,7 +18308,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_sub_sh() { + const unsafe fn test_mm_mask_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let src = _mm_set_sh(4.0); @@ -18095,7 +18321,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_sub_sh() { + const unsafe fn test_mm_maskz_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let r = _mm_maskz_sub_sh(0, a, b); @@ -18107,7 +18333,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_ph() { + const unsafe fn test_mm_mul_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); let r = _mm_mul_ph(a, b); @@ -18116,7 +18342,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_ph() { + const unsafe fn test_mm_mask_mul_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); @@ -18126,7 +18352,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_ph() { + const unsafe fn test_mm_maskz_mul_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); let r = _mm_maskz_mul_ph(0b01010101, a, b); @@ -18135,7 +18361,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_ph() { + const unsafe fn test_mm256_mul_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -18151,7 +18377,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_ph() { + const unsafe fn test_mm256_mask_mul_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -18169,7 +18395,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_ph() { + const unsafe fn test_mm256_maskz_mul_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -18184,7 +18410,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_ph() { + const unsafe fn test_mm512_mul_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -18205,7 +18431,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_ph() { + const unsafe fn test_mm512_mask_mul_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -18229,7 +18455,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_ph() { + const unsafe fn test_mm512_maskz_mul_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -18363,7 +18589,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_sh() { + const unsafe fn test_mm_mul_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let r = _mm_mul_sh(a, b); @@ -18372,7 +18598,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_sh() { + const unsafe fn test_mm_mask_mul_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let src = _mm_set_sh(4.0); @@ -18385,7 +18611,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_sh() { + const unsafe fn test_mm_maskz_mul_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let r = _mm_maskz_mul_sh(0, a, b); @@ -18397,7 +18623,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_div_ph() { + const unsafe fn test_mm_div_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let r = _mm_div_ph(a, b); @@ -18406,7 +18632,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_div_ph() { + const unsafe fn test_mm_mask_div_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); @@ -18416,7 +18642,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_div_ph() { + const unsafe fn test_mm_maskz_div_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let r = _mm_maskz_div_ph(0b01010101, a, b); @@ -18425,7 +18651,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_div_ph() { + const unsafe fn test_mm256_div_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let r = _mm256_div_ph(a, b); @@ -18434,7 +18660,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_div_ph() { + const unsafe fn test_mm256_mask_div_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let src = _mm256_set_ph( @@ -18449,7 +18675,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_div_ph() { + const unsafe fn test_mm256_maskz_div_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); @@ -18460,7 +18686,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_ph() { + const unsafe fn test_mm512_div_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let r = _mm512_div_ph(a, b); @@ -18469,7 +18695,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_ph() { + const unsafe fn test_mm512_mask_div_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let src = _mm512_set_ph( @@ -18486,7 +18712,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_ph() { + const unsafe fn test_mm512_maskz_div_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); @@ -18585,7 +18811,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_div_sh() { + const unsafe fn test_mm_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let r = _mm_div_sh(a, b); @@ -18594,7 +18820,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_div_sh() { + const unsafe fn test_mm_mask_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let src = _mm_set_sh(4.0); @@ -18607,7 +18833,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_div_sh() { + const unsafe fn test_mm_maskz_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let r = _mm_maskz_div_sh(0, a, b); @@ -19451,7 +19677,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_abs_ph() { + const unsafe fn test_mm_abs_ph() { let a = _mm_set_ph(-1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0); let r = _mm_abs_ph(a); let e = _mm_set_ph(1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0); @@ -19459,7 +19685,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_abs_ph() { + const unsafe fn test_mm256_abs_ph() { let a = _mm256_set_ph( -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, @@ -19472,7 +19698,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_abs_ph() { + const unsafe fn test_mm512_abs_ph() { let a = _mm512_set_ph( -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, 15.0, -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, @@ -19488,7 +19714,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_conj_pch() { + const unsafe fn test_mm_conj_pch() { let a = _mm_set1_pch(0.0, 1.0); let r = _mm_conj_pch(a); let e = _mm_set1_pch(0.0, -1.0); @@ -19496,7 +19722,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_conj_pch() { + const unsafe fn test_mm_mask_conj_pch() { let a = _mm_set1_pch(0.0, 1.0); let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); let r = _mm_mask_conj_pch(src, 0b0101, a); @@ -19505,7 +19731,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_conj_pch() { + const unsafe fn test_mm_maskz_conj_pch() { let a = _mm_set1_pch(0.0, 1.0); let r = _mm_maskz_conj_pch(0b0101, a); let e = _mm_setr_ph(0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0); @@ -19513,7 +19739,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_conj_pch() { + const unsafe fn test_mm256_conj_pch() { let a = _mm256_set1_pch(0.0, 1.0); let r = _mm256_conj_pch(a); let e = _mm256_set1_pch(0.0, -1.0); @@ -19521,7 +19747,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_conj_pch() { + const unsafe fn test_mm256_mask_conj_pch() { let a = _mm256_set1_pch(0.0, 1.0); let src = _mm256_setr_ph( 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, @@ -19534,7 +19760,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_conj_pch() { + const unsafe fn test_mm256_maskz_conj_pch() { let a = _mm256_set1_pch(0.0, 1.0); let r = _mm256_maskz_conj_pch(0b01010101, a); let e = _mm256_setr_ph( @@ -19544,7 +19770,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_conj_pch() { + const unsafe fn test_mm512_conj_pch() { let a = _mm512_set1_pch(0.0, 1.0); let r = _mm512_conj_pch(a); let e = _mm512_set1_pch(0.0, -1.0); @@ -19552,7 +19778,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_conj_pch() { + const unsafe fn test_mm512_mask_conj_pch() { let a = _mm512_set1_pch(0.0, 1.0); let src = _mm512_setr_ph( 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, @@ -19569,7 +19795,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_conj_pch() { + const unsafe fn test_mm512_maskz_conj_pch() { let a = _mm512_set1_pch(0.0, 1.0); let r = _mm512_maskz_conj_pch(0b0101010101010101, a); let e = _mm512_setr_ph( @@ -20200,7 +20426,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmadd_ph() { + const unsafe fn test_mm_fmadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20210,7 +20436,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmadd_ph() { + const unsafe fn test_mm_mask_fmadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20220,7 +20446,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmadd_ph() { + const unsafe fn test_mm_mask3_fmadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20230,7 +20456,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmadd_ph() { + const unsafe fn test_mm_maskz_fmadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20240,7 +20466,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmadd_ph() { + const unsafe fn test_mm256_fmadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20250,7 +20476,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmadd_ph() { + const unsafe fn test_mm256_mask_fmadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20262,7 +20488,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmadd_ph() { + const unsafe fn test_mm256_mask3_fmadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20274,7 +20500,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmadd_ph() { + const unsafe fn test_mm256_maskz_fmadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20286,7 +20512,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_ph() { + const unsafe fn test_mm512_fmadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -20296,7 +20522,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_ph() { + const unsafe fn test_mm512_mask_fmadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -20309,7 +20535,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_ph() { + const unsafe fn test_mm512_mask3_fmadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -20322,7 +20548,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_ph() { + const unsafe fn test_mm512_maskz_fmadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -20399,7 +20625,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmadd_sh() { + const unsafe fn test_mm_fmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -20409,7 +20635,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmadd_sh() { + const unsafe fn test_mm_mask_fmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -20422,7 +20648,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmadd_sh() { + const unsafe fn test_mm_mask3_fmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -20435,7 +20661,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmadd_sh() { + const unsafe fn test_mm_maskz_fmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -20509,7 +20735,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmsub_ph() { + const unsafe fn test_mm_fmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20519,7 +20745,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmsub_ph() { + const unsafe fn test_mm_mask_fmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20529,7 +20755,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmsub_ph() { + const unsafe fn test_mm_mask3_fmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20539,7 +20765,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmsub_ph() { + const unsafe fn test_mm_maskz_fmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20549,7 +20775,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmsub_ph() { + const unsafe fn test_mm256_fmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20559,7 +20785,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmsub_ph() { + const unsafe fn test_mm256_mask_fmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20571,7 +20797,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmsub_ph() { + const unsafe fn test_mm256_mask3_fmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20583,7 +20809,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmsub_ph() { + const unsafe fn test_mm256_maskz_fmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20595,7 +20821,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsub_ph() { + const unsafe fn test_mm512_fmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -20605,7 +20831,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsub_ph() { + const unsafe fn test_mm512_mask_fmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -20618,7 +20844,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsub_ph() { + const unsafe fn test_mm512_mask3_fmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -20631,7 +20857,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsub_ph() { + const unsafe fn test_mm512_maskz_fmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -20708,7 +20934,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmsub_sh() { + const unsafe fn test_mm_fmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -20718,7 +20944,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmsub_sh() { + const unsafe fn test_mm_mask_fmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -20731,7 +20957,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmsub_sh() { + const unsafe fn test_mm_mask3_fmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -20744,7 +20970,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmsub_sh() { + const unsafe fn test_mm_maskz_fmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -20818,7 +21044,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmadd_ph() { + const unsafe fn test_mm_fnmadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20828,7 +21054,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmadd_ph() { + const unsafe fn test_mm_mask_fnmadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20838,7 +21064,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmadd_ph() { + const unsafe fn test_mm_mask3_fnmadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20848,7 +21074,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmadd_ph() { + const unsafe fn test_mm_maskz_fnmadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -20858,7 +21084,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fnmadd_ph() { + const unsafe fn test_mm256_fnmadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20868,7 +21094,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fnmadd_ph() { + const unsafe fn test_mm256_mask_fnmadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20880,7 +21106,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fnmadd_ph() { + const unsafe fn test_mm256_mask3_fnmadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20892,7 +21118,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fnmadd_ph() { + const unsafe fn test_mm256_maskz_fnmadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -20904,7 +21130,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmadd_ph() { + const unsafe fn test_mm512_fnmadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -20914,7 +21140,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmadd_ph() { + const unsafe fn test_mm512_mask_fnmadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -20927,7 +21153,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmadd_ph() { + const unsafe fn test_mm512_mask3_fnmadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -20940,7 +21166,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmadd_ph() { + const unsafe fn test_mm512_maskz_fnmadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21018,7 +21244,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmadd_sh() { + const unsafe fn test_mm_fnmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -21028,7 +21254,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmadd_sh() { + const unsafe fn test_mm_mask_fnmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -21041,7 +21267,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmadd_sh() { + const unsafe fn test_mm_mask3_fnmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -21054,7 +21280,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmadd_sh() { + const unsafe fn test_mm_maskz_fnmadd_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -21128,7 +21354,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmsub_ph() { + const unsafe fn test_mm_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21138,7 +21364,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmsub_ph() { + const unsafe fn test_mm_mask_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21148,7 +21374,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmsub_ph() { + const unsafe fn test_mm_mask3_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21158,7 +21384,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmsub_ph() { + const unsafe fn test_mm_maskz_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21168,7 +21394,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fnmsub_ph() { + const unsafe fn test_mm256_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21178,7 +21404,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fnmsub_ph() { + const unsafe fn test_mm256_mask_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21190,7 +21416,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fnmsub_ph() { + const unsafe fn test_mm256_mask3_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21202,7 +21428,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fnmsub_ph() { + const unsafe fn test_mm256_maskz_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21214,7 +21440,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmsub_ph() { + const unsafe fn test_mm512_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21224,7 +21450,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmsub_ph() { + const unsafe fn test_mm512_mask_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21237,7 +21463,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmsub_ph() { + const unsafe fn test_mm512_mask3_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21250,7 +21476,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmsub_ph() { + const unsafe fn test_mm512_maskz_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21328,7 +21554,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmsub_sh() { + const unsafe fn test_mm_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -21338,7 +21564,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmsub_sh() { + const unsafe fn test_mm_mask_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -21351,7 +21577,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmsub_sh() { + const unsafe fn test_mm_mask3_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -21364,7 +21590,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmsub_sh() { + const unsafe fn test_mm_maskz_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); @@ -21438,7 +21664,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmaddsub_ph() { + const unsafe fn test_mm_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21448,7 +21674,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmaddsub_ph() { + const unsafe fn test_mm_mask_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21458,7 +21684,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmaddsub_ph() { + const unsafe fn test_mm_mask3_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21468,7 +21694,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmaddsub_ph() { + const unsafe fn test_mm_maskz_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21478,7 +21704,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmaddsub_ph() { + const unsafe fn test_mm256_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21490,7 +21716,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmaddsub_ph() { + const unsafe fn test_mm256_mask_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21502,7 +21728,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmaddsub_ph() { + const unsafe fn test_mm256_mask3_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21514,7 +21740,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmaddsub_ph() { + const unsafe fn test_mm256_maskz_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21526,7 +21752,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmaddsub_ph() { + const unsafe fn test_mm512_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21539,7 +21765,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmaddsub_ph() { + const unsafe fn test_mm512_mask_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21552,7 +21778,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmaddsub_ph() { + const unsafe fn test_mm512_mask3_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21565,7 +21791,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmaddsub_ph() { + const unsafe fn test_mm512_maskz_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21646,7 +21872,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmsubadd_ph() { + const unsafe fn test_mm_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21656,7 +21882,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmsubadd_ph() { + const unsafe fn test_mm_mask_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21666,7 +21892,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmsubadd_ph() { + const unsafe fn test_mm_mask3_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21676,7 +21902,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmsubadd_ph() { + const unsafe fn test_mm_maskz_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); @@ -21686,7 +21912,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmsubadd_ph() { + const unsafe fn test_mm256_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21698,7 +21924,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmsubadd_ph() { + const unsafe fn test_mm256_mask_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21710,7 +21936,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmsubadd_ph() { + const unsafe fn test_mm256_mask3_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21722,7 +21948,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmsubadd_ph() { + const unsafe fn test_mm256_maskz_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); @@ -21734,7 +21960,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsubadd_ph() { + const unsafe fn test_mm512_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21747,7 +21973,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsubadd_ph() { + const unsafe fn test_mm512_mask_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21760,7 +21986,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsubadd_ph() { + const unsafe fn test_mm512_mask3_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -21773,7 +21999,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsubadd_ph() { + const unsafe fn test_mm512_maskz_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); @@ -23738,42 +23964,42 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_add_ph() { + const unsafe fn test_mm_reduce_add_ph() { let a = _mm_set1_ph(2.0); let r = _mm_reduce_add_ph(a); assert_eq!(r, 16.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_add_ph() { + const unsafe fn test_mm256_reduce_add_ph() { let a = _mm256_set1_ph(2.0); let r = _mm256_reduce_add_ph(a); assert_eq!(r, 32.0); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_add_ph() { + const unsafe fn test_mm512_reduce_add_ph() { let a = _mm512_set1_ph(2.0); let r = _mm512_reduce_add_ph(a); assert_eq!(r, 64.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_mul_ph() { + const unsafe fn test_mm_reduce_mul_ph() { let a = _mm_set1_ph(2.0); let r = _mm_reduce_mul_ph(a); assert_eq!(r, 256.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_mul_ph() { + const unsafe fn test_mm256_reduce_mul_ph() { let a = _mm256_set1_ph(2.0); let r = _mm256_reduce_mul_ph(a); assert_eq!(r, 65536.0); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_mul_ph() { + const unsafe fn test_mm512_reduce_mul_ph() { let a = _mm512_set1_ph(2.0); let r = _mm512_reduce_mul_ph(a); assert_eq!(r, 16777216.0); @@ -24010,7 +24236,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_blend_ph() { + const unsafe fn test_mm_mask_blend_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(-1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0); let r = _mm_mask_blend_ph(0b01010101, a, b); @@ -24019,7 +24245,7 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_blend_ph() { + const unsafe fn test_mm256_mask_blend_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -24036,7 +24262,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_blend_ph() { + const unsafe fn test_mm512_mask_blend_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -27307,14 +27533,14 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cvtsh_h() { + const unsafe fn test_mm_cvtsh_h() { let a = _mm_setr_ph(1.0, 2.0, 3.0, 42.0, 5.0, 6.0, 7.0, 8.0); let r = _mm_cvtsh_h(a); assert_eq!(r, 1.0); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_cvtsh_h() { + const unsafe fn test_mm256_cvtsh_h() { let a = _mm256_setr_ph( 1.0, 2.0, 3.0, 42.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -27323,7 +27549,7 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvtsh_h() { + const unsafe fn test_mm512_cvtsh_h() { let a = _mm512_setr_ph( 1.0, 2.0, 3.0, 42.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -27334,14 +27560,14 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cvtsi128_si16() { + const unsafe fn test_mm_cvtsi128_si16() { let a = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm_cvtsi128_si16(a); assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cvtsi16_si128() { + const unsafe fn test_mm_cvtsi16_si128() { let a = 1; let r = _mm_cvtsi16_si128(a); let e = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); diff --git a/crates/core_arch/src/x86/avx512vbmi2.rs b/crates/core_arch/src/x86/avx512vbmi2.rs index e25fd4528d..da268f969c 100644 --- a/crates/core_arch/src/x86/avx512vbmi2.rs +++ b/crates/core_arch/src/x86/avx512vbmi2.rs @@ -499,7 +499,8 @@ pub fn _mm_maskz_expand_epi8(k: __mmask16, a: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvq))] -pub fn _mm512_shldv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shldv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { transmute(simd_funnel_shl( a.as_i64x8(), @@ -516,7 +517,8 @@ pub fn _mm512_shldv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvq))] -pub fn _mm512_mask_shldv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shldv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shldv_epi64(a, b, c).as_i64x8(); transmute(simd_select_bitmask(k, shf, a.as_i64x8())) @@ -530,7 +532,8 @@ pub fn _mm512_mask_shldv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __m512i) #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvq))] -pub fn _mm512_maskz_shldv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shldv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shldv_epi64(a, b, c).as_i64x8(); transmute(simd_select_bitmask(k, shf, i64x8::ZERO)) @@ -544,7 +547,8 @@ pub fn _mm512_maskz_shldv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: __m512i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvq))] -pub fn _mm256_shldv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shldv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { transmute(simd_funnel_shl( a.as_i64x4(), @@ -561,7 +565,8 @@ pub fn _mm256_shldv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvq))] -pub fn _mm256_mask_shldv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shldv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shldv_epi64(a, b, c).as_i64x4(); transmute(simd_select_bitmask(k, shf, a.as_i64x4())) @@ -575,7 +580,8 @@ pub fn _mm256_mask_shldv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvq))] -pub fn _mm256_maskz_shldv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shldv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shldv_epi64(a, b, c).as_i64x4(); transmute(simd_select_bitmask(k, shf, i64x4::ZERO)) @@ -589,7 +595,8 @@ pub fn _mm256_maskz_shldv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvq))] -pub fn _mm_shldv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shldv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { transmute(simd_funnel_shl( a.as_i64x2(), @@ -606,7 +613,8 @@ pub fn _mm_shldv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvq))] -pub fn _mm_mask_shldv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shldv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shldv_epi64(a, b, c).as_i64x2(); transmute(simd_select_bitmask(k, shf, a.as_i64x2())) @@ -620,7 +628,8 @@ pub fn _mm_mask_shldv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvq))] -pub fn _mm_maskz_shldv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shldv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shldv_epi64(a, b, c).as_i64x2(); transmute(simd_select_bitmask(k, shf, i64x2::ZERO)) @@ -634,7 +643,8 @@ pub fn _mm_maskz_shldv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvd))] -pub fn _mm512_shldv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shldv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { transmute(simd_funnel_shl( a.as_i32x16(), @@ -651,7 +661,8 @@ pub fn _mm512_shldv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvd))] -pub fn _mm512_mask_shldv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shldv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shldv_epi32(a, b, c).as_i32x16(); transmute(simd_select_bitmask(k, shf, a.as_i32x16())) @@ -665,7 +676,8 @@ pub fn _mm512_mask_shldv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: __m512i) #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvd))] -pub fn _mm512_maskz_shldv_epi32(k: __mmask16, a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shldv_epi32(k: __mmask16, a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shldv_epi32(a, b, c).as_i32x16(); transmute(simd_select_bitmask(k, shf, i32x16::ZERO)) @@ -679,7 +691,8 @@ pub fn _mm512_maskz_shldv_epi32(k: __mmask16, a: __m512i, b: __m512i, c: __m512i #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvd))] -pub fn _mm256_shldv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shldv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { transmute(simd_funnel_shl( a.as_i32x8(), @@ -696,7 +709,8 @@ pub fn _mm256_shldv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvd))] -pub fn _mm256_mask_shldv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shldv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shldv_epi32(a, b, c).as_i32x8(); transmute(simd_select_bitmask(k, shf, a.as_i32x8())) @@ -710,7 +724,8 @@ pub fn _mm256_mask_shldv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvd))] -pub fn _mm256_maskz_shldv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shldv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shldv_epi32(a, b, c).as_i32x8(); transmute(simd_select_bitmask(k, shf, i32x8::ZERO)) @@ -724,7 +739,8 @@ pub fn _mm256_maskz_shldv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvd))] -pub fn _mm_shldv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shldv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { transmute(simd_funnel_shl( a.as_i32x4(), @@ -741,7 +757,8 @@ pub fn _mm_shldv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvd))] -pub fn _mm_mask_shldv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shldv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shldv_epi32(a, b, c).as_i32x4(); transmute(simd_select_bitmask(k, shf, a.as_i32x4())) @@ -755,7 +772,8 @@ pub fn _mm_mask_shldv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvd))] -pub fn _mm_maskz_shldv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shldv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shldv_epi32(a, b, c).as_i32x4(); transmute(simd_select_bitmask(k, shf, i32x4::ZERO)) @@ -769,7 +787,8 @@ pub fn _mm_maskz_shldv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvw))] -pub fn _mm512_shldv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shldv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { transmute(simd_funnel_shl( a.as_i16x32(), @@ -786,7 +805,8 @@ pub fn _mm512_shldv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvw))] -pub fn _mm512_mask_shldv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shldv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shldv_epi16(a, b, c).as_i16x32(); transmute(simd_select_bitmask(k, shf, a.as_i16x32())) @@ -800,7 +820,8 @@ pub fn _mm512_mask_shldv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: __m512i) #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvw))] -pub fn _mm512_maskz_shldv_epi16(k: __mmask32, a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shldv_epi16(k: __mmask32, a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shldv_epi16(a, b, c).as_i16x32(); transmute(simd_select_bitmask(k, shf, i16x32::ZERO)) @@ -814,7 +835,8 @@ pub fn _mm512_maskz_shldv_epi16(k: __mmask32, a: __m512i, b: __m512i, c: __m512i #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvw))] -pub fn _mm256_shldv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shldv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { transmute(simd_funnel_shl( a.as_i16x16(), @@ -831,7 +853,8 @@ pub fn _mm256_shldv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvw))] -pub fn _mm256_mask_shldv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shldv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shldv_epi16(a, b, c).as_i16x16(); transmute(simd_select_bitmask(k, shf, a.as_i16x16())) @@ -845,7 +868,8 @@ pub fn _mm256_mask_shldv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: __m256i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvw))] -pub fn _mm256_maskz_shldv_epi16(k: __mmask16, a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shldv_epi16(k: __mmask16, a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shldv_epi16(a, b, c).as_i16x16(); transmute(simd_select_bitmask(k, shf, i16x16::ZERO)) @@ -859,7 +883,8 @@ pub fn _mm256_maskz_shldv_epi16(k: __mmask16, a: __m256i, b: __m256i, c: __m256i #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvw))] -pub fn _mm_shldv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shldv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { transmute(simd_funnel_shl( a.as_i16x8(), @@ -876,7 +901,8 @@ pub fn _mm_shldv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvw))] -pub fn _mm_mask_shldv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shldv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shldv_epi16(a, b, c).as_i16x8(); transmute(simd_select_bitmask(k, shf, a.as_i16x8())) @@ -890,7 +916,8 @@ pub fn _mm_mask_shldv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldvw))] -pub fn _mm_maskz_shldv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shldv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shldv_epi16(a, b, c).as_i16x8(); transmute(simd_select_bitmask(k, shf, i16x8::ZERO)) @@ -904,7 +931,8 @@ pub fn _mm_maskz_shldv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvq))] -pub fn _mm512_shrdv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shrdv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { transmute(simd_funnel_shr( b.as_i64x8(), @@ -921,7 +949,8 @@ pub fn _mm512_shrdv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvq))] -pub fn _mm512_mask_shrdv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shrdv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shrdv_epi64(a, b, c).as_i64x8(); transmute(simd_select_bitmask(k, shf, a.as_i64x8())) @@ -935,7 +964,8 @@ pub fn _mm512_mask_shrdv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __m512i) #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvq))] -pub fn _mm512_maskz_shrdv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shrdv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shrdv_epi64(a, b, c).as_i64x8(); transmute(simd_select_bitmask(k, shf, i64x8::ZERO)) @@ -949,7 +979,8 @@ pub fn _mm512_maskz_shrdv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: __m512i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvq))] -pub fn _mm256_shrdv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shrdv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { transmute(simd_funnel_shr( b.as_i64x4(), @@ -966,7 +997,8 @@ pub fn _mm256_shrdv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvq))] -pub fn _mm256_mask_shrdv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shrdv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shrdv_epi64(a, b, c).as_i64x4(); transmute(simd_select_bitmask(k, shf, a.as_i64x4())) @@ -980,7 +1012,8 @@ pub fn _mm256_mask_shrdv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvq))] -pub fn _mm256_maskz_shrdv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shrdv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shrdv_epi64(a, b, c).as_i64x4(); transmute(simd_select_bitmask(k, shf, i64x4::ZERO)) @@ -994,7 +1027,8 @@ pub fn _mm256_maskz_shrdv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvq))] -pub fn _mm_shrdv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shrdv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { transmute(simd_funnel_shr( b.as_i64x2(), @@ -1011,7 +1045,8 @@ pub fn _mm_shrdv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvq))] -pub fn _mm_mask_shrdv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shrdv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shrdv_epi64(a, b, c).as_i64x2(); transmute(simd_select_bitmask(k, shf, a.as_i64x2())) @@ -1025,7 +1060,8 @@ pub fn _mm_mask_shrdv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvq))] -pub fn _mm_maskz_shrdv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shrdv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shrdv_epi64(a, b, c).as_i64x2(); transmute(simd_select_bitmask(k, shf, i64x2::ZERO)) @@ -1039,7 +1075,8 @@ pub fn _mm_maskz_shrdv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvd))] -pub fn _mm512_shrdv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shrdv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { transmute(simd_funnel_shr( b.as_i32x16(), @@ -1056,7 +1093,8 @@ pub fn _mm512_shrdv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvd))] -pub fn _mm512_mask_shrdv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shrdv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shrdv_epi32(a, b, c).as_i32x16(); transmute(simd_select_bitmask(k, shf, a.as_i32x16())) @@ -1070,7 +1108,8 @@ pub fn _mm512_mask_shrdv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: __m512i) #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvd))] -pub fn _mm512_maskz_shrdv_epi32(k: __mmask16, a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shrdv_epi32(k: __mmask16, a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shrdv_epi32(a, b, c).as_i32x16(); transmute(simd_select_bitmask(k, shf, i32x16::ZERO)) @@ -1084,7 +1123,8 @@ pub fn _mm512_maskz_shrdv_epi32(k: __mmask16, a: __m512i, b: __m512i, c: __m512i #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvd))] -pub fn _mm256_shrdv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shrdv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { transmute(simd_funnel_shr( b.as_i32x8(), @@ -1101,7 +1141,8 @@ pub fn _mm256_shrdv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvd))] -pub fn _mm256_mask_shrdv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shrdv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shrdv_epi32(a, b, c).as_i32x8(); transmute(simd_select_bitmask(k, shf, a.as_i32x8())) @@ -1115,7 +1156,8 @@ pub fn _mm256_mask_shrdv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvd))] -pub fn _mm256_maskz_shrdv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shrdv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shrdv_epi32(a, b, c).as_i32x8(); transmute(simd_select_bitmask(k, shf, i32x8::ZERO)) @@ -1129,7 +1171,8 @@ pub fn _mm256_maskz_shrdv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvd))] -pub fn _mm_shrdv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shrdv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { transmute(simd_funnel_shr( b.as_i32x4(), @@ -1146,7 +1189,8 @@ pub fn _mm_shrdv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvd))] -pub fn _mm_mask_shrdv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shrdv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shrdv_epi32(a, b, c).as_i32x4(); transmute(simd_select_bitmask(k, shf, a.as_i32x4())) @@ -1160,7 +1204,8 @@ pub fn _mm_mask_shrdv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvd))] -pub fn _mm_maskz_shrdv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shrdv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shrdv_epi32(a, b, c).as_i32x4(); transmute(simd_select_bitmask(k, shf, i32x4::ZERO)) @@ -1174,7 +1219,8 @@ pub fn _mm_maskz_shrdv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvw))] -pub fn _mm512_shrdv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shrdv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { transmute(simd_funnel_shr( b.as_i16x32(), @@ -1191,7 +1237,8 @@ pub fn _mm512_shrdv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i { #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvw))] -pub fn _mm512_mask_shrdv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shrdv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shrdv_epi16(a, b, c).as_i16x32(); transmute(simd_select_bitmask(k, shf, a.as_i16x32())) @@ -1205,7 +1252,8 @@ pub fn _mm512_mask_shrdv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: __m512i) #[target_feature(enable = "avx512vbmi2")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvw))] -pub fn _mm512_maskz_shrdv_epi16(k: __mmask32, a: __m512i, b: __m512i, c: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shrdv_epi16(k: __mmask32, a: __m512i, b: __m512i, c: __m512i) -> __m512i { unsafe { let shf = _mm512_shrdv_epi16(a, b, c).as_i16x32(); transmute(simd_select_bitmask(k, shf, i16x32::ZERO)) @@ -1219,7 +1267,8 @@ pub fn _mm512_maskz_shrdv_epi16(k: __mmask32, a: __m512i, b: __m512i, c: __m512i #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvw))] -pub fn _mm256_shrdv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shrdv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { transmute(simd_funnel_shr( b.as_i16x16(), @@ -1236,7 +1285,8 @@ pub fn _mm256_shrdv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvw))] -pub fn _mm256_mask_shrdv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shrdv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shrdv_epi16(a, b, c).as_i16x16(); transmute(simd_select_bitmask(k, shf, a.as_i16x16())) @@ -1250,7 +1300,8 @@ pub fn _mm256_mask_shrdv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: __m256i) #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvw))] -pub fn _mm256_maskz_shrdv_epi16(k: __mmask16, a: __m256i, b: __m256i, c: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shrdv_epi16(k: __mmask16, a: __m256i, b: __m256i, c: __m256i) -> __m256i { unsafe { let shf = _mm256_shrdv_epi16(a, b, c).as_i16x16(); transmute(simd_select_bitmask(k, shf, i16x16::ZERO)) @@ -1264,7 +1315,8 @@ pub fn _mm256_maskz_shrdv_epi16(k: __mmask16, a: __m256i, b: __m256i, c: __m256i #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvw))] -pub fn _mm_shrdv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shrdv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { transmute(simd_funnel_shr( b.as_i16x8(), @@ -1281,7 +1333,8 @@ pub fn _mm_shrdv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i { #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvw))] -pub fn _mm_mask_shrdv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shrdv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shrdv_epi16(a, b, c).as_i16x8(); transmute(simd_select_bitmask(k, shf, a.as_i16x8())) @@ -1295,7 +1348,8 @@ pub fn _mm_mask_shrdv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> #[target_feature(enable = "avx512vbmi2,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshrdvw))] -pub fn _mm_maskz_shrdv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shrdv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { unsafe { let shf = _mm_shrdv_epi16(a, b, c).as_i16x8(); transmute(simd_select_bitmask(k, shf, i16x8::ZERO)) @@ -1310,7 +1364,8 @@ pub fn _mm_maskz_shrdv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_shldi_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shldi_epi64(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shldv_epi64(a, b, _mm512_set1_epi64(IMM8 as i64)) } @@ -1323,7 +1378,8 @@ pub fn _mm512_shldi_epi64(a: __m512i, b: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shldi_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shldi_epi64( src: __m512i, k: __mmask8, a: __m512i, @@ -1344,7 +1400,12 @@ pub fn _mm512_mask_shldi_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shldi_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shldi_epi64( + k: __mmask8, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm512_shldi_epi64::(a, b).as_i64x8(); @@ -1360,7 +1421,8 @@ pub fn _mm512_maskz_shldi_epi64(k: __mmask8, a: __m512i, b: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_shldi_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shldi_epi64(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shldv_epi64(a, b, _mm256_set1_epi64x(IMM8 as i64)) } @@ -1373,7 +1435,8 @@ pub fn _mm256_shldi_epi64(a: __m256i, b: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shldi_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shldi_epi64( src: __m256i, k: __mmask8, a: __m256i, @@ -1394,7 +1457,12 @@ pub fn _mm256_mask_shldi_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shldi_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shldi_epi64( + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm256_shldi_epi64::(a, b).as_i64x4(); @@ -1410,7 +1478,8 @@ pub fn _mm256_maskz_shldi_epi64(k: __mmask8, a: __m256i, b: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm_shldi_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shldi_epi64(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shldv_epi64(a, b, _mm_set1_epi64x(IMM8 as i64)) } @@ -1423,7 +1492,8 @@ pub fn _mm_shldi_epi64(a: __m128i, b: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(4)] -pub fn _mm_mask_shldi_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shldi_epi64( src: __m128i, k: __mmask8, a: __m128i, @@ -1444,7 +1514,12 @@ pub fn _mm_mask_shldi_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm_maskz_shldi_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shldi_epi64( + k: __mmask8, + a: __m128i, + b: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm_shldi_epi64::(a, b).as_i64x2(); @@ -1460,7 +1535,8 @@ pub fn _mm_maskz_shldi_epi64(k: __mmask8, a: __m128i, b: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_shldi_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shldi_epi32(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shldv_epi32(a, b, _mm512_set1_epi32(IMM8)) } @@ -1473,7 +1549,8 @@ pub fn _mm512_shldi_epi32(a: __m512i, b: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shldi_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shldi_epi32( src: __m512i, k: __mmask16, a: __m512i, @@ -1494,7 +1571,12 @@ pub fn _mm512_mask_shldi_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shldi_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shldi_epi32( + k: __mmask16, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm512_shldi_epi32::(a, b).as_i32x16(); @@ -1510,7 +1592,8 @@ pub fn _mm512_maskz_shldi_epi32(k: __mmask16, a: __m512i, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_shldi_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shldi_epi32(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shldv_epi32(a, b, _mm256_set1_epi32(IMM8)) } @@ -1523,7 +1606,8 @@ pub fn _mm256_shldi_epi32(a: __m256i, b: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shldi_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shldi_epi32( src: __m256i, k: __mmask8, a: __m256i, @@ -1544,7 +1628,12 @@ pub fn _mm256_mask_shldi_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shldi_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shldi_epi32( + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm256_shldi_epi32::(a, b).as_i32x8(); @@ -1560,7 +1649,8 @@ pub fn _mm256_maskz_shldi_epi32(k: __mmask8, a: __m256i, b: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm_shldi_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shldi_epi32(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shldv_epi32(a, b, _mm_set1_epi32(IMM8)) } @@ -1573,7 +1663,8 @@ pub fn _mm_shldi_epi32(a: __m128i, b: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(4)] -pub fn _mm_mask_shldi_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shldi_epi32( src: __m128i, k: __mmask8, a: __m128i, @@ -1594,7 +1685,12 @@ pub fn _mm_mask_shldi_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm_maskz_shldi_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shldi_epi32( + k: __mmask8, + a: __m128i, + b: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm_shldi_epi32::(a, b).as_i32x4(); @@ -1610,7 +1706,8 @@ pub fn _mm_maskz_shldi_epi32(k: __mmask8, a: __m128i, b: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm512_shldi_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shldi_epi16(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shldv_epi16(a, b, _mm512_set1_epi16(IMM8 as i16)) } @@ -1623,7 +1720,8 @@ pub fn _mm512_shldi_epi16(a: __m512i, b: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shldi_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shldi_epi16( src: __m512i, k: __mmask32, a: __m512i, @@ -1644,7 +1742,12 @@ pub fn _mm512_mask_shldi_epi16( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shldi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shldi_epi16( + k: __mmask32, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm512_shldi_epi16::(a, b).as_i16x32(); @@ -1660,7 +1763,8 @@ pub fn _mm512_maskz_shldi_epi16(k: __mmask32, a: __m512i, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm256_shldi_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shldi_epi16(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shldv_epi16(a, b, _mm256_set1_epi16(IMM8 as i16)) } @@ -1673,7 +1777,8 @@ pub fn _mm256_shldi_epi16(a: __m256i, b: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shldi_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shldi_epi16( src: __m256i, k: __mmask16, a: __m256i, @@ -1694,7 +1799,12 @@ pub fn _mm256_mask_shldi_epi16( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shldi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shldi_epi16( + k: __mmask16, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm256_shldi_epi16::(a, b).as_i16x16(); @@ -1710,7 +1820,8 @@ pub fn _mm256_maskz_shldi_epi16(k: __mmask16, a: __m256i, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] -pub fn _mm_shldi_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shldi_epi16(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shldv_epi16(a, b, _mm_set1_epi16(IMM8 as i16)) } @@ -1723,7 +1834,8 @@ pub fn _mm_shldi_epi16(a: __m128i, b: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(4)] -pub fn _mm_mask_shldi_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shldi_epi16( src: __m128i, k: __mmask8, a: __m128i, @@ -1744,7 +1856,12 @@ pub fn _mm_mask_shldi_epi16( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] -pub fn _mm_maskz_shldi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shldi_epi16( + k: __mmask8, + a: __m128i, + b: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm_shldi_epi16::(a, b).as_i16x8(); @@ -1760,7 +1877,8 @@ pub fn _mm_maskz_shldi_epi16(k: __mmask8, a: __m128i, b: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(2)] -pub fn _mm512_shrdi_epi64(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shrdi_epi64(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shrdv_epi64(a, b, _mm512_set1_epi64(IMM8 as i64)) } @@ -1773,7 +1891,8 @@ pub fn _mm512_shrdi_epi64(a: __m512i, b: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shrdi_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shrdi_epi64( src: __m512i, k: __mmask8, a: __m512i, @@ -1794,7 +1913,12 @@ pub fn _mm512_mask_shrdi_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 255))] //should be vpshrdq #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shrdi_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shrdi_epi64( + k: __mmask8, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm512_shrdi_epi64::(a, b).as_i64x8(); @@ -1810,7 +1934,8 @@ pub fn _mm512_maskz_shrdi_epi64(k: __mmask8, a: __m512i, b: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(2)] -pub fn _mm256_shrdi_epi64(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shrdi_epi64(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shrdv_epi64(a, b, _mm256_set1_epi64x(IMM8 as i64)) } @@ -1823,7 +1948,8 @@ pub fn _mm256_shrdi_epi64(a: __m256i, b: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shrdi_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shrdi_epi64( src: __m256i, k: __mmask8, a: __m256i, @@ -1844,7 +1970,12 @@ pub fn _mm256_mask_shrdi_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shrdi_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shrdi_epi64( + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm256_shrdi_epi64::(a, b).as_i64x4(); @@ -1860,7 +1991,8 @@ pub fn _mm256_maskz_shrdi_epi64(k: __mmask8, a: __m256i, b: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(2)] -pub fn _mm_shrdi_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shrdi_epi64(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shrdv_epi64(a, b, _mm_set1_epi64x(IMM8 as i64)) } @@ -1873,7 +2005,8 @@ pub fn _mm_shrdi_epi64(a: __m128i, b: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(4)] -pub fn _mm_mask_shrdi_epi64( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shrdi_epi64( src: __m128i, k: __mmask8, a: __m128i, @@ -1894,7 +2027,12 @@ pub fn _mm_mask_shrdi_epi64( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(3)] -pub fn _mm_maskz_shrdi_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shrdi_epi64( + k: __mmask8, + a: __m128i, + b: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm_shrdi_epi64::(a, b).as_i64x2(); @@ -1910,7 +2048,8 @@ pub fn _mm_maskz_shrdi_epi64(k: __mmask8, a: __m128i, b: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(2)] -pub fn _mm512_shrdi_epi32(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shrdi_epi32(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shrdv_epi32(a, b, _mm512_set1_epi32(IMM8)) } @@ -1923,7 +2062,8 @@ pub fn _mm512_shrdi_epi32(a: __m512i, b: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shrdi_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shrdi_epi32( src: __m512i, k: __mmask16, a: __m512i, @@ -1944,7 +2084,12 @@ pub fn _mm512_mask_shrdi_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shrdi_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shrdi_epi32( + k: __mmask16, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm512_shrdi_epi32::(a, b).as_i32x16(); @@ -1960,7 +2105,8 @@ pub fn _mm512_maskz_shrdi_epi32(k: __mmask16, a: __m512i, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(2)] -pub fn _mm256_shrdi_epi32(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shrdi_epi32(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shrdv_epi32(a, b, _mm256_set1_epi32(IMM8)) } @@ -1973,7 +2119,8 @@ pub fn _mm256_shrdi_epi32(a: __m256i, b: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shrdi_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shrdi_epi32( src: __m256i, k: __mmask8, a: __m256i, @@ -1994,7 +2141,12 @@ pub fn _mm256_mask_shrdi_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shrdi_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shrdi_epi32( + k: __mmask8, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm256_shrdi_epi32::(a, b).as_i32x8(); @@ -2010,7 +2162,8 @@ pub fn _mm256_maskz_shrdi_epi32(k: __mmask8, a: __m256i, b: __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(2)] -pub fn _mm_shrdi_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shrdi_epi32(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shrdv_epi32(a, b, _mm_set1_epi32(IMM8)) } @@ -2023,7 +2176,8 @@ pub fn _mm_shrdi_epi32(a: __m128i, b: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(4)] -pub fn _mm_mask_shrdi_epi32( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shrdi_epi32( src: __m128i, k: __mmask8, a: __m128i, @@ -2044,7 +2198,12 @@ pub fn _mm_mask_shrdi_epi32( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(3)] -pub fn _mm_maskz_shrdi_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shrdi_epi32( + k: __mmask8, + a: __m128i, + b: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm_shrdi_epi32::(a, b).as_i32x4(); @@ -2060,7 +2219,8 @@ pub fn _mm_maskz_shrdi_epi32(k: __mmask8, a: __m128i, b: __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(2)] -pub fn _mm512_shrdi_epi16(a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_shrdi_epi16(a: __m512i, b: __m512i) -> __m512i { static_assert_uimm_bits!(IMM8, 8); _mm512_shrdv_epi16(a, b, _mm512_set1_epi16(IMM8 as i16)) } @@ -2073,7 +2233,8 @@ pub fn _mm512_shrdi_epi16(a: __m512i, b: __m512i) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(4)] -pub fn _mm512_mask_shrdi_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_shrdi_epi16( src: __m512i, k: __mmask32, a: __m512i, @@ -2094,7 +2255,12 @@ pub fn _mm512_mask_shrdi_epi16( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(3)] -pub fn _mm512_maskz_shrdi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_shrdi_epi16( + k: __mmask32, + a: __m512i, + b: __m512i, +) -> __m512i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm512_shrdi_epi16::(a, b).as_i16x32(); @@ -2110,7 +2276,8 @@ pub fn _mm512_maskz_shrdi_epi16(k: __mmask32, a: __m512i, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(2)] -pub fn _mm256_shrdi_epi16(a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_shrdi_epi16(a: __m256i, b: __m256i) -> __m256i { static_assert_uimm_bits!(IMM8, 8); _mm256_shrdv_epi16(a, b, _mm256_set1_epi16(IMM8 as i16)) } @@ -2123,7 +2290,8 @@ pub fn _mm256_shrdi_epi16(a: __m256i, b: __m256i) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(4)] -pub fn _mm256_mask_shrdi_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_shrdi_epi16( src: __m256i, k: __mmask16, a: __m256i, @@ -2144,7 +2312,12 @@ pub fn _mm256_mask_shrdi_epi16( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(3)] -pub fn _mm256_maskz_shrdi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_shrdi_epi16( + k: __mmask16, + a: __m256i, + b: __m256i, +) -> __m256i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm256_shrdi_epi16::(a, b).as_i16x16(); @@ -2160,7 +2333,8 @@ pub fn _mm256_maskz_shrdi_epi16(k: __mmask16, a: __m256i, b: __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(2)] -pub fn _mm_shrdi_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shrdi_epi16(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); _mm_shrdv_epi16(a, b, _mm_set1_epi16(IMM8 as i16)) } @@ -2173,7 +2347,8 @@ pub fn _mm_shrdi_epi16(a: __m128i, b: __m128i) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(4)] -pub fn _mm_mask_shrdi_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_shrdi_epi16( src: __m128i, k: __mmask8, a: __m128i, @@ -2194,7 +2369,12 @@ pub fn _mm_mask_shrdi_epi16( #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(3)] -pub fn _mm_maskz_shrdi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_shrdi_epi16( + k: __mmask8, + a: __m128i, + b: __m128i, +) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); let shf = _mm_shrdi_epi16::(a, b).as_i16x8(); @@ -2262,6 +2442,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; @@ -2581,7 +2762,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shldv_epi64() { + const unsafe fn test_mm512_shldv_epi64() { let a = _mm512_set1_epi64(1); let b = _mm512_set1_epi64(1 << 63); let c = _mm512_set1_epi64(2); @@ -2591,7 +2772,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shldv_epi64() { + const unsafe fn test_mm512_mask_shldv_epi64() { let a = _mm512_set1_epi64(1); let b = _mm512_set1_epi64(1 << 63); let c = _mm512_set1_epi64(2); @@ -2603,7 +2784,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shldv_epi64() { + const unsafe fn test_mm512_maskz_shldv_epi64() { let a = _mm512_set1_epi64(1); let b = _mm512_set1_epi64(1 << 63); let c = _mm512_set1_epi64(2); @@ -2615,7 +2796,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shldv_epi64() { + const unsafe fn test_mm256_shldv_epi64() { let a = _mm256_set1_epi64x(1); let b = _mm256_set1_epi64x(1 << 63); let c = _mm256_set1_epi64x(2); @@ -2625,7 +2806,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shldv_epi64() { + const unsafe fn test_mm256_mask_shldv_epi64() { let a = _mm256_set1_epi64x(1); let b = _mm256_set1_epi64x(1 << 63); let c = _mm256_set1_epi64x(2); @@ -2637,7 +2818,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shldv_epi64() { + const unsafe fn test_mm256_maskz_shldv_epi64() { let a = _mm256_set1_epi64x(1); let b = _mm256_set1_epi64x(1 << 63); let c = _mm256_set1_epi64x(2); @@ -2649,7 +2830,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shldv_epi64() { + const unsafe fn test_mm_shldv_epi64() { let a = _mm_set1_epi64x(1); let b = _mm_set1_epi64x(1 << 63); let c = _mm_set1_epi64x(2); @@ -2659,7 +2840,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shldv_epi64() { + const unsafe fn test_mm_mask_shldv_epi64() { let a = _mm_set1_epi64x(1); let b = _mm_set1_epi64x(1 << 63); let c = _mm_set1_epi64x(2); @@ -2671,7 +2852,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shldv_epi64() { + const unsafe fn test_mm_maskz_shldv_epi64() { let a = _mm_set1_epi64x(1); let b = _mm_set1_epi64x(1 << 63); let c = _mm_set1_epi64x(2); @@ -2683,7 +2864,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shldv_epi32() { + const unsafe fn test_mm512_shldv_epi32() { let a = _mm512_set1_epi32(1); let b = _mm512_set1_epi32(1 << 31); let c = _mm512_set1_epi32(2); @@ -2693,7 +2874,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shldv_epi32() { + const unsafe fn test_mm512_mask_shldv_epi32() { let a = _mm512_set1_epi32(1); let b = _mm512_set1_epi32(1 << 31); let c = _mm512_set1_epi32(2); @@ -2705,7 +2886,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shldv_epi32() { + const unsafe fn test_mm512_maskz_shldv_epi32() { let a = _mm512_set1_epi32(1); let b = _mm512_set1_epi32(1 << 31); let c = _mm512_set1_epi32(2); @@ -2717,7 +2898,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shldv_epi32() { + const unsafe fn test_mm256_shldv_epi32() { let a = _mm256_set1_epi32(1); let b = _mm256_set1_epi32(1 << 31); let c = _mm256_set1_epi32(2); @@ -2727,7 +2908,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shldv_epi32() { + const unsafe fn test_mm256_mask_shldv_epi32() { let a = _mm256_set1_epi32(1); let b = _mm256_set1_epi32(1 << 31); let c = _mm256_set1_epi32(2); @@ -2739,7 +2920,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shldv_epi32() { + const unsafe fn test_mm256_maskz_shldv_epi32() { let a = _mm256_set1_epi32(1); let b = _mm256_set1_epi32(1 << 31); let c = _mm256_set1_epi32(2); @@ -2751,7 +2932,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shldv_epi32() { + const unsafe fn test_mm_shldv_epi32() { let a = _mm_set1_epi32(1); let b = _mm_set1_epi32(1 << 31); let c = _mm_set1_epi32(2); @@ -2761,7 +2942,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shldv_epi32() { + const unsafe fn test_mm_mask_shldv_epi32() { let a = _mm_set1_epi32(1); let b = _mm_set1_epi32(1 << 31); let c = _mm_set1_epi32(2); @@ -2773,7 +2954,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shldv_epi32() { + const unsafe fn test_mm_maskz_shldv_epi32() { let a = _mm_set1_epi32(1); let b = _mm_set1_epi32(1 << 31); let c = _mm_set1_epi32(2); @@ -2785,7 +2966,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shldv_epi16() { + const unsafe fn test_mm512_shldv_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1 << 15); let c = _mm512_set1_epi16(2); @@ -2795,7 +2976,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shldv_epi16() { + const unsafe fn test_mm512_mask_shldv_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1 << 15); let c = _mm512_set1_epi16(2); @@ -2807,7 +2988,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shldv_epi16() { + const unsafe fn test_mm512_maskz_shldv_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1 << 15); let c = _mm512_set1_epi16(2); @@ -2819,7 +3000,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shldv_epi16() { + const unsafe fn test_mm256_shldv_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1 << 15); let c = _mm256_set1_epi16(2); @@ -2829,7 +3010,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shldv_epi16() { + const unsafe fn test_mm256_mask_shldv_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1 << 15); let c = _mm256_set1_epi16(2); @@ -2841,7 +3022,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shldv_epi16() { + const unsafe fn test_mm256_maskz_shldv_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1 << 15); let c = _mm256_set1_epi16(2); @@ -2853,7 +3034,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shldv_epi16() { + const unsafe fn test_mm_shldv_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1 << 15); let c = _mm_set1_epi16(2); @@ -2863,7 +3044,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shldv_epi16() { + const unsafe fn test_mm_mask_shldv_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1 << 15); let c = _mm_set1_epi16(2); @@ -2875,7 +3056,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shldv_epi16() { + const unsafe fn test_mm_maskz_shldv_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1 << 15); let c = _mm_set1_epi16(2); @@ -2887,7 +3068,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shrdv_epi64() { + const unsafe fn test_mm512_shrdv_epi64() { let a = _mm512_set1_epi64(2); let b = _mm512_set1_epi64(8); let c = _mm512_set1_epi64(1); @@ -2897,7 +3078,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shrdv_epi64() { + const unsafe fn test_mm512_mask_shrdv_epi64() { let a = _mm512_set1_epi64(2); let b = _mm512_set1_epi64(8); let c = _mm512_set1_epi64(1); @@ -2909,7 +3090,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shrdv_epi64() { + const unsafe fn test_mm512_maskz_shrdv_epi64() { let a = _mm512_set1_epi64(2); let b = _mm512_set1_epi64(8); let c = _mm512_set1_epi64(1); @@ -2921,7 +3102,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shrdv_epi64() { + const unsafe fn test_mm256_shrdv_epi64() { let a = _mm256_set1_epi64x(2); let b = _mm256_set1_epi64x(8); let c = _mm256_set1_epi64x(1); @@ -2931,7 +3112,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shrdv_epi64() { + const unsafe fn test_mm256_mask_shrdv_epi64() { let a = _mm256_set1_epi64x(2); let b = _mm256_set1_epi64x(8); let c = _mm256_set1_epi64x(1); @@ -2943,7 +3124,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shrdv_epi64() { + const unsafe fn test_mm256_maskz_shrdv_epi64() { let a = _mm256_set1_epi64x(2); let b = _mm256_set1_epi64x(8); let c = _mm256_set1_epi64x(1); @@ -2955,7 +3136,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shrdv_epi64() { + const unsafe fn test_mm_shrdv_epi64() { let a = _mm_set1_epi64x(2); let b = _mm_set1_epi64x(8); let c = _mm_set1_epi64x(1); @@ -2965,7 +3146,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shrdv_epi64() { + const unsafe fn test_mm_mask_shrdv_epi64() { let a = _mm_set1_epi64x(2); let b = _mm_set1_epi64x(8); let c = _mm_set1_epi64x(1); @@ -2977,7 +3158,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shrdv_epi64() { + const unsafe fn test_mm_maskz_shrdv_epi64() { let a = _mm_set1_epi64x(2); let b = _mm_set1_epi64x(8); let c = _mm_set1_epi64x(1); @@ -2989,7 +3170,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shrdv_epi32() { + const unsafe fn test_mm512_shrdv_epi32() { let a = _mm512_set1_epi32(2); let b = _mm512_set1_epi32(8); let c = _mm512_set1_epi32(1); @@ -2999,7 +3180,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shrdv_epi32() { + const unsafe fn test_mm512_mask_shrdv_epi32() { let a = _mm512_set1_epi32(2); let b = _mm512_set1_epi32(8); let c = _mm512_set1_epi32(1); @@ -3011,7 +3192,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shrdv_epi32() { + const unsafe fn test_mm512_maskz_shrdv_epi32() { let a = _mm512_set1_epi32(2); let b = _mm512_set1_epi32(8); let c = _mm512_set1_epi32(1); @@ -3023,7 +3204,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shrdv_epi32() { + const unsafe fn test_mm256_shrdv_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(8); let c = _mm256_set1_epi32(1); @@ -3033,7 +3214,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shrdv_epi32() { + const unsafe fn test_mm256_mask_shrdv_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(8); let c = _mm256_set1_epi32(1); @@ -3045,7 +3226,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shrdv_epi32() { + const unsafe fn test_mm256_maskz_shrdv_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(8); let c = _mm256_set1_epi32(1); @@ -3057,7 +3238,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shrdv_epi32() { + const unsafe fn test_mm_shrdv_epi32() { let a = _mm_set1_epi32(2); let b = _mm_set1_epi32(8); let c = _mm_set1_epi32(1); @@ -3067,7 +3248,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shrdv_epi32() { + const unsafe fn test_mm_mask_shrdv_epi32() { let a = _mm_set1_epi32(2); let b = _mm_set1_epi32(8); let c = _mm_set1_epi32(1); @@ -3079,7 +3260,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shrdv_epi32() { + const unsafe fn test_mm_maskz_shrdv_epi32() { let a = _mm_set1_epi32(2); let b = _mm_set1_epi32(8); let c = _mm_set1_epi32(1); @@ -3091,7 +3272,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shrdv_epi16() { + const unsafe fn test_mm512_shrdv_epi16() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(8); let c = _mm512_set1_epi16(1); @@ -3101,7 +3282,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shrdv_epi16() { + const unsafe fn test_mm512_mask_shrdv_epi16() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(8); let c = _mm512_set1_epi16(1); @@ -3113,7 +3294,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shrdv_epi16() { + const unsafe fn test_mm512_maskz_shrdv_epi16() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(8); let c = _mm512_set1_epi16(1); @@ -3125,7 +3306,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shrdv_epi16() { + const unsafe fn test_mm256_shrdv_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(8); let c = _mm256_set1_epi16(1); @@ -3135,7 +3316,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shrdv_epi16() { + const unsafe fn test_mm256_mask_shrdv_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(8); let c = _mm256_set1_epi16(1); @@ -3147,7 +3328,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shrdv_epi16() { + const unsafe fn test_mm256_maskz_shrdv_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(8); let c = _mm256_set1_epi16(1); @@ -3159,7 +3340,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shrdv_epi16() { + const unsafe fn test_mm_shrdv_epi16() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(8); let c = _mm_set1_epi16(1); @@ -3169,7 +3350,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shrdv_epi16() { + const unsafe fn test_mm_mask_shrdv_epi16() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(8); let c = _mm_set1_epi16(1); @@ -3181,7 +3362,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shrdv_epi16() { + const unsafe fn test_mm_maskz_shrdv_epi16() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(8); let c = _mm_set1_epi16(1); @@ -3193,7 +3374,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shldi_epi64() { + const unsafe fn test_mm512_shldi_epi64() { let a = _mm512_set1_epi64(1); let b = _mm512_set1_epi64(1 << 63); let r = _mm512_shldi_epi64::<2>(a, b); @@ -3202,7 +3383,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shldi_epi64() { + const unsafe fn test_mm512_mask_shldi_epi64() { let a = _mm512_set1_epi64(1); let b = _mm512_set1_epi64(1 << 63); let r = _mm512_mask_shldi_epi64::<2>(a, 0, a, b); @@ -3213,7 +3394,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shldi_epi64() { + const unsafe fn test_mm512_maskz_shldi_epi64() { let a = _mm512_set1_epi64(1); let b = _mm512_set1_epi64(1 << 63); let r = _mm512_maskz_shldi_epi64::<2>(0, a, b); @@ -3224,7 +3405,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shldi_epi64() { + const unsafe fn test_mm256_shldi_epi64() { let a = _mm256_set1_epi64x(1); let b = _mm256_set1_epi64x(1 << 63); let r = _mm256_shldi_epi64::<2>(a, b); @@ -3233,7 +3414,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shldi_epi64() { + const unsafe fn test_mm256_mask_shldi_epi64() { let a = _mm256_set1_epi64x(1); let b = _mm256_set1_epi64x(1 << 63); let r = _mm256_mask_shldi_epi64::<2>(a, 0, a, b); @@ -3244,7 +3425,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shldi_epi64() { + const unsafe fn test_mm256_maskz_shldi_epi64() { let a = _mm256_set1_epi64x(1); let b = _mm256_set1_epi64x(1 << 63); let r = _mm256_maskz_shldi_epi64::<2>(0, a, b); @@ -3255,7 +3436,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shldi_epi64() { + const unsafe fn test_mm_shldi_epi64() { let a = _mm_set1_epi64x(1); let b = _mm_set1_epi64x(1 << 63); let r = _mm_shldi_epi64::<2>(a, b); @@ -3264,7 +3445,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shldi_epi64() { + const unsafe fn test_mm_mask_shldi_epi64() { let a = _mm_set1_epi64x(1); let b = _mm_set1_epi64x(1 << 63); let r = _mm_mask_shldi_epi64::<2>(a, 0, a, b); @@ -3275,7 +3456,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shldi_epi64() { + const unsafe fn test_mm_maskz_shldi_epi64() { let a = _mm_set1_epi64x(1); let b = _mm_set1_epi64x(1 << 63); let r = _mm_maskz_shldi_epi64::<2>(0, a, b); @@ -3286,7 +3467,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shldi_epi32() { + const unsafe fn test_mm512_shldi_epi32() { let a = _mm512_set1_epi32(1); let b = _mm512_set1_epi32(1 << 31); let r = _mm512_shldi_epi32::<2>(a, b); @@ -3295,7 +3476,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shldi_epi32() { + const unsafe fn test_mm512_mask_shldi_epi32() { let a = _mm512_set1_epi32(1); let b = _mm512_set1_epi32(1 << 31); let r = _mm512_mask_shldi_epi32::<2>(a, 0, a, b); @@ -3306,7 +3487,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shldi_epi32() { + const unsafe fn test_mm512_maskz_shldi_epi32() { let a = _mm512_set1_epi32(1); let b = _mm512_set1_epi32(1 << 31); let r = _mm512_maskz_shldi_epi32::<2>(0, a, b); @@ -3317,7 +3498,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shldi_epi32() { + const unsafe fn test_mm256_shldi_epi32() { let a = _mm256_set1_epi32(1); let b = _mm256_set1_epi32(1 << 31); let r = _mm256_shldi_epi32::<2>(a, b); @@ -3326,7 +3507,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shldi_epi32() { + const unsafe fn test_mm256_mask_shldi_epi32() { let a = _mm256_set1_epi32(1); let b = _mm256_set1_epi32(1 << 31); let r = _mm256_mask_shldi_epi32::<2>(a, 0, a, b); @@ -3337,7 +3518,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shldi_epi32() { + const unsafe fn test_mm256_maskz_shldi_epi32() { let a = _mm256_set1_epi32(1); let b = _mm256_set1_epi32(1 << 31); let r = _mm256_maskz_shldi_epi32::<2>(0, a, b); @@ -3348,7 +3529,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shldi_epi32() { + const unsafe fn test_mm_shldi_epi32() { let a = _mm_set1_epi32(1); let b = _mm_set1_epi32(1 << 31); let r = _mm_shldi_epi32::<2>(a, b); @@ -3357,7 +3538,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shldi_epi32() { + const unsafe fn test_mm_mask_shldi_epi32() { let a = _mm_set1_epi32(1); let b = _mm_set1_epi32(1 << 31); let r = _mm_mask_shldi_epi32::<2>(a, 0, a, b); @@ -3368,7 +3549,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shldi_epi32() { + const unsafe fn test_mm_maskz_shldi_epi32() { let a = _mm_set1_epi32(1); let b = _mm_set1_epi32(1 << 31); let r = _mm_maskz_shldi_epi32::<2>(0, a, b); @@ -3379,7 +3560,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shldi_epi16() { + const unsafe fn test_mm512_shldi_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1 << 15); let r = _mm512_shldi_epi16::<2>(a, b); @@ -3388,7 +3569,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shldi_epi16() { + const unsafe fn test_mm512_mask_shldi_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1 << 15); let r = _mm512_mask_shldi_epi16::<2>(a, 0, a, b); @@ -3399,7 +3580,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shldi_epi16() { + const unsafe fn test_mm512_maskz_shldi_epi16() { let a = _mm512_set1_epi16(1); let b = _mm512_set1_epi16(1 << 15); let r = _mm512_maskz_shldi_epi16::<2>(0, a, b); @@ -3410,7 +3591,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shldi_epi16() { + const unsafe fn test_mm256_shldi_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1 << 15); let r = _mm256_shldi_epi16::<2>(a, b); @@ -3419,7 +3600,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shldi_epi16() { + const unsafe fn test_mm256_mask_shldi_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1 << 15); let r = _mm256_mask_shldi_epi16::<2>(a, 0, a, b); @@ -3430,7 +3611,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shldi_epi16() { + const unsafe fn test_mm256_maskz_shldi_epi16() { let a = _mm256_set1_epi16(1); let b = _mm256_set1_epi16(1 << 15); let r = _mm256_maskz_shldi_epi16::<2>(0, a, b); @@ -3441,7 +3622,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shldi_epi16() { + const unsafe fn test_mm_shldi_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1 << 15); let r = _mm_shldi_epi16::<2>(a, b); @@ -3450,7 +3631,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shldi_epi16() { + const unsafe fn test_mm_mask_shldi_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1 << 15); let r = _mm_mask_shldi_epi16::<2>(a, 0, a, b); @@ -3461,7 +3642,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shldi_epi16() { + const unsafe fn test_mm_maskz_shldi_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(1 << 15); let r = _mm_maskz_shldi_epi16::<2>(0, a, b); @@ -3472,7 +3653,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shrdi_epi64() { + const unsafe fn test_mm512_shrdi_epi64() { let a = _mm512_set1_epi64(2); let b = _mm512_set1_epi64(8); let r = _mm512_shrdi_epi64::<1>(a, b); @@ -3481,7 +3662,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shrdi_epi64() { + const unsafe fn test_mm512_mask_shrdi_epi64() { let a = _mm512_set1_epi64(2); let b = _mm512_set1_epi64(8); let r = _mm512_mask_shrdi_epi64::<1>(a, 0, a, b); @@ -3492,7 +3673,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shrdi_epi64() { + const unsafe fn test_mm512_maskz_shrdi_epi64() { let a = _mm512_set1_epi64(2); let b = _mm512_set1_epi64(8); let r = _mm512_maskz_shrdi_epi64::<1>(0, a, b); @@ -3503,7 +3684,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shrdi_epi64() { + const unsafe fn test_mm256_shrdi_epi64() { let a = _mm256_set1_epi64x(2); let b = _mm256_set1_epi64x(8); let r = _mm256_shrdi_epi64::<1>(a, b); @@ -3512,7 +3693,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shrdi_epi64() { + const unsafe fn test_mm256_mask_shrdi_epi64() { let a = _mm256_set1_epi64x(2); let b = _mm256_set1_epi64x(8); let r = _mm256_mask_shrdi_epi64::<1>(a, 0, a, b); @@ -3523,7 +3704,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shrdi_epi64() { + const unsafe fn test_mm256_maskz_shrdi_epi64() { let a = _mm256_set1_epi64x(2); let b = _mm256_set1_epi64x(8); let r = _mm256_maskz_shrdi_epi64::<1>(0, a, b); @@ -3534,7 +3715,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shrdi_epi64() { + const unsafe fn test_mm_shrdi_epi64() { let a = _mm_set1_epi64x(2); let b = _mm_set1_epi64x(8); let r = _mm_shrdi_epi64::<1>(a, b); @@ -3543,7 +3724,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shrdi_epi64() { + const unsafe fn test_mm_mask_shrdi_epi64() { let a = _mm_set1_epi64x(2); let b = _mm_set1_epi64x(8); let r = _mm_mask_shrdi_epi64::<1>(a, 0, a, b); @@ -3554,7 +3735,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shrdi_epi64() { + const unsafe fn test_mm_maskz_shrdi_epi64() { let a = _mm_set1_epi64x(2); let b = _mm_set1_epi64x(8); let r = _mm_maskz_shrdi_epi64::<1>(0, a, b); @@ -3565,7 +3746,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shrdi_epi32() { + const unsafe fn test_mm512_shrdi_epi32() { let a = _mm512_set1_epi32(2); let b = _mm512_set1_epi32(8); let r = _mm512_shrdi_epi32::<1>(a, b); @@ -3574,7 +3755,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shrdi_epi32() { + const unsafe fn test_mm512_mask_shrdi_epi32() { let a = _mm512_set1_epi32(2); let b = _mm512_set1_epi32(8); let r = _mm512_mask_shrdi_epi32::<1>(a, 0, a, b); @@ -3585,7 +3766,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shrdi_epi32() { + const unsafe fn test_mm512_maskz_shrdi_epi32() { let a = _mm512_set1_epi32(2); let b = _mm512_set1_epi32(8); let r = _mm512_maskz_shrdi_epi32::<1>(0, a, b); @@ -3596,7 +3777,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shrdi_epi32() { + const unsafe fn test_mm256_shrdi_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(8); let r = _mm256_shrdi_epi32::<1>(a, b); @@ -3605,7 +3786,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shrdi_epi32() { + const unsafe fn test_mm256_mask_shrdi_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(8); let r = _mm256_mask_shrdi_epi32::<1>(a, 0, a, b); @@ -3616,7 +3797,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shrdi_epi32() { + const unsafe fn test_mm256_maskz_shrdi_epi32() { let a = _mm256_set1_epi32(2); let b = _mm256_set1_epi32(8); let r = _mm256_maskz_shrdi_epi32::<1>(0, a, b); @@ -3627,7 +3808,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shrdi_epi32() { + const unsafe fn test_mm_shrdi_epi32() { let a = _mm_set1_epi32(2); let b = _mm_set1_epi32(8); let r = _mm_shrdi_epi32::<1>(a, b); @@ -3636,7 +3817,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shrdi_epi32() { + const unsafe fn test_mm_mask_shrdi_epi32() { let a = _mm_set1_epi32(2); let b = _mm_set1_epi32(8); let r = _mm_mask_shrdi_epi32::<1>(a, 0, a, b); @@ -3647,7 +3828,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shrdi_epi32() { + const unsafe fn test_mm_maskz_shrdi_epi32() { let a = _mm_set1_epi32(2); let b = _mm_set1_epi32(8); let r = _mm_maskz_shrdi_epi32::<1>(0, a, b); @@ -3658,7 +3839,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_shrdi_epi16() { + const unsafe fn test_mm512_shrdi_epi16() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(8); let r = _mm512_shrdi_epi16::<1>(a, b); @@ -3667,7 +3848,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_mask_shrdi_epi16() { + const unsafe fn test_mm512_mask_shrdi_epi16() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(8); let r = _mm512_mask_shrdi_epi16::<1>(a, 0, a, b); @@ -3678,7 +3859,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2")] - unsafe fn test_mm512_maskz_shrdi_epi16() { + const unsafe fn test_mm512_maskz_shrdi_epi16() { let a = _mm512_set1_epi16(2); let b = _mm512_set1_epi16(8); let r = _mm512_maskz_shrdi_epi16::<1>(0, a, b); @@ -3689,7 +3870,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_shrdi_epi16() { + const unsafe fn test_mm256_shrdi_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(8); let r = _mm256_shrdi_epi16::<1>(a, b); @@ -3698,7 +3879,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_mask_shrdi_epi16() { + const unsafe fn test_mm256_mask_shrdi_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(8); let r = _mm256_mask_shrdi_epi16::<1>(a, 0, a, b); @@ -3709,7 +3890,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm256_maskz_shrdi_epi16() { + const unsafe fn test_mm256_maskz_shrdi_epi16() { let a = _mm256_set1_epi16(2); let b = _mm256_set1_epi16(8); let r = _mm256_maskz_shrdi_epi16::<1>(0, a, b); @@ -3720,7 +3901,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_shrdi_epi16() { + const unsafe fn test_mm_shrdi_epi16() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(8); let r = _mm_shrdi_epi16::<1>(a, b); @@ -3729,7 +3910,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_mask_shrdi_epi16() { + const unsafe fn test_mm_mask_shrdi_epi16() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(8); let r = _mm_mask_shrdi_epi16::<1>(a, 0, a, b); @@ -3740,7 +3921,7 @@ mod tests { } #[simd_test(enable = "avx512vbmi2,avx512vl")] - unsafe fn test_mm_maskz_shrdi_epi16() { + const unsafe fn test_mm_maskz_shrdi_epi16() { let a = _mm_set1_epi16(2); let b = _mm_set1_epi16(8); let r = _mm_maskz_shrdi_epi16::<1>(0, a, b); diff --git a/crates/core_arch/src/x86/avx512vpopcntdq.rs b/crates/core_arch/src/x86/avx512vpopcntdq.rs index e47a14b24d..42bff33a28 100644 --- a/crates/core_arch/src/x86/avx512vpopcntdq.rs +++ b/crates/core_arch/src/x86/avx512vpopcntdq.rs @@ -26,7 +26,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "avx512vpopcntdq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntd))] -pub fn _mm512_popcnt_epi32(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_popcnt_epi32(a: __m512i) -> __m512i { unsafe { transmute(simd_ctpop(a.as_i32x16())) } } @@ -40,7 +41,8 @@ pub fn _mm512_popcnt_epi32(a: __m512i) -> __m512i { #[target_feature(enable = "avx512vpopcntdq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntd))] -pub fn _mm512_maskz_popcnt_epi32(k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_popcnt_epi32(k: __mmask16, a: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask( k, @@ -60,7 +62,8 @@ pub fn _mm512_maskz_popcnt_epi32(k: __mmask16, a: __m512i) -> __m512i { #[target_feature(enable = "avx512vpopcntdq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntd))] -pub fn _mm512_mask_popcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_popcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask( k, @@ -77,7 +80,8 @@ pub fn _mm512_mask_popcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m51 #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntd))] -pub fn _mm256_popcnt_epi32(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_popcnt_epi32(a: __m256i) -> __m256i { unsafe { transmute(simd_ctpop(a.as_i32x8())) } } @@ -91,7 +95,8 @@ pub fn _mm256_popcnt_epi32(a: __m256i) -> __m256i { #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntd))] -pub fn _mm256_maskz_popcnt_epi32(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_popcnt_epi32(k: __mmask8, a: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask( k, @@ -111,7 +116,8 @@ pub fn _mm256_maskz_popcnt_epi32(k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntd))] -pub fn _mm256_mask_popcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_popcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask( k, @@ -128,7 +134,8 @@ pub fn _mm256_mask_popcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256 #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntd))] -pub fn _mm_popcnt_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_popcnt_epi32(a: __m128i) -> __m128i { unsafe { transmute(simd_ctpop(a.as_i32x4())) } } @@ -142,7 +149,8 @@ pub fn _mm_popcnt_epi32(a: __m128i) -> __m128i { #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntd))] -pub fn _mm_maskz_popcnt_epi32(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_popcnt_epi32(k: __mmask8, a: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask( k, @@ -162,7 +170,8 @@ pub fn _mm_maskz_popcnt_epi32(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntd))] -pub fn _mm_mask_popcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_popcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask( k, @@ -179,7 +188,8 @@ pub fn _mm_mask_popcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512vpopcntdq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntq))] -pub fn _mm512_popcnt_epi64(a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_popcnt_epi64(a: __m512i) -> __m512i { unsafe { transmute(simd_ctpop(a.as_i64x8())) } } @@ -193,7 +203,8 @@ pub fn _mm512_popcnt_epi64(a: __m512i) -> __m512i { #[target_feature(enable = "avx512vpopcntdq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntq))] -pub fn _mm512_maskz_popcnt_epi64(k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_maskz_popcnt_epi64(k: __mmask8, a: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask( k, @@ -213,7 +224,8 @@ pub fn _mm512_maskz_popcnt_epi64(k: __mmask8, a: __m512i) -> __m512i { #[target_feature(enable = "avx512vpopcntdq")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntq))] -pub fn _mm512_mask_popcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm512_mask_popcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { unsafe { transmute(simd_select_bitmask( k, @@ -230,7 +242,8 @@ pub fn _mm512_mask_popcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512 #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntq))] -pub fn _mm256_popcnt_epi64(a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_popcnt_epi64(a: __m256i) -> __m256i { unsafe { transmute(simd_ctpop(a.as_i64x4())) } } @@ -244,7 +257,8 @@ pub fn _mm256_popcnt_epi64(a: __m256i) -> __m256i { #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntq))] -pub fn _mm256_maskz_popcnt_epi64(k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_maskz_popcnt_epi64(k: __mmask8, a: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask( k, @@ -264,7 +278,8 @@ pub fn _mm256_maskz_popcnt_epi64(k: __mmask8, a: __m256i) -> __m256i { #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntq))] -pub fn _mm256_mask_popcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_mask_popcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { unsafe { transmute(simd_select_bitmask( k, @@ -281,7 +296,8 @@ pub fn _mm256_mask_popcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256 #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntq))] -pub fn _mm_popcnt_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_popcnt_epi64(a: __m128i) -> __m128i { unsafe { transmute(simd_ctpop(a.as_i64x2())) } } @@ -295,7 +311,8 @@ pub fn _mm_popcnt_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntq))] -pub fn _mm_maskz_popcnt_epi64(k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_maskz_popcnt_epi64(k: __mmask8, a: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask( k, @@ -315,7 +332,8 @@ pub fn _mm_maskz_popcnt_epi64(k: __mmask8, a: __m128i) -> __m128i { #[target_feature(enable = "avx512vpopcntdq,avx512vl")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpopcntq))] -pub fn _mm_mask_popcnt_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mask_popcnt_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { unsafe { transmute(simd_select_bitmask( k, @@ -327,12 +345,13 @@ pub fn _mm_mask_popcnt_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86::*; #[simd_test(enable = "avx512vpopcntdq,avx512f")] - unsafe fn test_mm512_popcnt_epi32() { + const unsafe fn test_mm512_popcnt_epi32() { let test_data = _mm512_set_epi32( 0, 1, @@ -358,7 +377,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512f")] - unsafe fn test_mm512_mask_popcnt_epi32() { + const unsafe fn test_mm512_mask_popcnt_epi32() { let test_data = _mm512_set_epi32( 0, 1, @@ -401,7 +420,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512f")] - unsafe fn test_mm512_maskz_popcnt_epi32() { + const unsafe fn test_mm512_maskz_popcnt_epi32() { let test_data = _mm512_set_epi32( 0, 1, @@ -427,7 +446,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512f,avx512vl")] - unsafe fn test_mm256_popcnt_epi32() { + const unsafe fn test_mm256_popcnt_epi32() { let test_data = _mm256_set_epi32(0, 1, -1, 2, 7, 0xFF_FE, 0x7F_FF_FF_FF, -100); let actual_result = _mm256_popcnt_epi32(test_data); let reference_result = _mm256_set_epi32(0, 1, 32, 1, 3, 15, 31, 28); @@ -435,7 +454,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512f,avx512vl")] - unsafe fn test_mm256_mask_popcnt_epi32() { + const unsafe fn test_mm256_mask_popcnt_epi32() { let test_data = _mm256_set_epi32(0, 1, -1, 2, 7, 0xFF_FE, 0x7F_FF_FF_FF, -100); let mask = 0xF0; let actual_result = _mm256_mask_popcnt_epi32(test_data, mask, test_data); @@ -444,7 +463,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512f,avx512vl")] - unsafe fn test_mm256_maskz_popcnt_epi32() { + const unsafe fn test_mm256_maskz_popcnt_epi32() { let test_data = _mm256_set_epi32(0, 1, -1, 2, 7, 0xFF_FE, 0x7F_FF_FF_FF, -100); let mask = 0xF0; let actual_result = _mm256_maskz_popcnt_epi32(mask, test_data); @@ -453,7 +472,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512f,avx512vl")] - unsafe fn test_mm_popcnt_epi32() { + const unsafe fn test_mm_popcnt_epi32() { let test_data = _mm_set_epi32(0, 1, -1, -100); let actual_result = _mm_popcnt_epi32(test_data); let reference_result = _mm_set_epi32(0, 1, 32, 28); @@ -461,7 +480,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512f,avx512vl")] - unsafe fn test_mm_mask_popcnt_epi32() { + const unsafe fn test_mm_mask_popcnt_epi32() { let test_data = _mm_set_epi32(0, 1, -1, -100); let mask = 0xE; let actual_result = _mm_mask_popcnt_epi32(test_data, mask, test_data); @@ -470,7 +489,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512f,avx512vl")] - unsafe fn test_mm_maskz_popcnt_epi32() { + const unsafe fn test_mm_maskz_popcnt_epi32() { let test_data = _mm_set_epi32(0, 1, -1, -100); let mask = 0xE; let actual_result = _mm_maskz_popcnt_epi32(mask, test_data); @@ -479,7 +498,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512f")] - unsafe fn test_mm512_popcnt_epi64() { + const unsafe fn test_mm512_popcnt_epi64() { let test_data = _mm512_set_epi64(0, 1, -1, 2, 7, 0xFF_FE, 0x7F_FF_FF_FF_FF_FF_FF_FF, -100); let actual_result = _mm512_popcnt_epi64(test_data); let reference_result = _mm512_set_epi64(0, 1, 64, 1, 3, 15, 63, 60); @@ -487,7 +506,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512f")] - unsafe fn test_mm512_mask_popcnt_epi64() { + const unsafe fn test_mm512_mask_popcnt_epi64() { let test_data = _mm512_set_epi64(0, 1, -1, 2, 7, 0xFF_FE, 0x7F_FF_FF_FF_FF_FF_FF_FF, -100); let mask = 0xF0; let actual_result = _mm512_mask_popcnt_epi64(test_data, mask, test_data); @@ -497,7 +516,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512f")] - unsafe fn test_mm512_maskz_popcnt_epi64() { + const unsafe fn test_mm512_maskz_popcnt_epi64() { let test_data = _mm512_set_epi64(0, 1, -1, 2, 7, 0xFF_FE, 0x7F_FF_FF_FF_FF_FF_FF_FF, -100); let mask = 0xF0; let actual_result = _mm512_maskz_popcnt_epi64(mask, test_data); @@ -506,7 +525,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512vl")] - unsafe fn test_mm256_popcnt_epi64() { + const unsafe fn test_mm256_popcnt_epi64() { let test_data = _mm256_set_epi64x(0, 1, -1, -100); let actual_result = _mm256_popcnt_epi64(test_data); let reference_result = _mm256_set_epi64x(0, 1, 64, 60); @@ -514,7 +533,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512vl")] - unsafe fn test_mm256_mask_popcnt_epi64() { + const unsafe fn test_mm256_mask_popcnt_epi64() { let test_data = _mm256_set_epi64x(0, 1, -1, -100); let mask = 0xE; let actual_result = _mm256_mask_popcnt_epi64(test_data, mask, test_data); @@ -523,7 +542,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512vl")] - unsafe fn test_mm256_maskz_popcnt_epi64() { + const unsafe fn test_mm256_maskz_popcnt_epi64() { let test_data = _mm256_set_epi64x(0, 1, -1, -100); let mask = 0xE; let actual_result = _mm256_maskz_popcnt_epi64(mask, test_data); @@ -532,7 +551,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512vl")] - unsafe fn test_mm_popcnt_epi64() { + const unsafe fn test_mm_popcnt_epi64() { let test_data = _mm_set_epi64x(0, 1); let actual_result = _mm_popcnt_epi64(test_data); let reference_result = _mm_set_epi64x(0, 1); @@ -544,7 +563,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512vl")] - unsafe fn test_mm_mask_popcnt_epi64() { + const unsafe fn test_mm_mask_popcnt_epi64() { let test_data = _mm_set_epi64x(0, -100); let mask = 0x2; let actual_result = _mm_mask_popcnt_epi64(test_data, mask, test_data); @@ -558,7 +577,7 @@ mod tests { } #[simd_test(enable = "avx512vpopcntdq,avx512vl")] - unsafe fn test_mm_maskz_popcnt_epi64() { + const unsafe fn test_mm_maskz_popcnt_epi64() { let test_data = _mm_set_epi64x(0, 1); let mask = 0x2; let actual_result = _mm_maskz_popcnt_epi64(mask, test_data); diff --git a/crates/core_arch/src/x86/bmi1.rs b/crates/core_arch/src/x86/bmi1.rs index eb7242944a..08444aaaed 100644 --- a/crates/core_arch/src/x86/bmi1.rs +++ b/crates/core_arch/src/x86/bmi1.rs @@ -46,7 +46,8 @@ pub fn _bextr2_u32(a: u32, control: u32) -> u32 { #[target_feature(enable = "bmi1")] #[cfg_attr(test, assert_instr(andn))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _andn_u32(a: u32, b: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _andn_u32(a: u32, b: u32) -> u32 { !a & b } @@ -57,7 +58,8 @@ pub fn _andn_u32(a: u32, b: u32) -> u32 { #[target_feature(enable = "bmi1")] #[cfg_attr(test, assert_instr(blsi))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blsi_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blsi_u32(x: u32) -> u32 { x & x.wrapping_neg() } @@ -68,7 +70,8 @@ pub fn _blsi_u32(x: u32) -> u32 { #[target_feature(enable = "bmi1")] #[cfg_attr(test, assert_instr(blsmsk))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blsmsk_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blsmsk_u32(x: u32) -> u32 { x ^ (x.wrapping_sub(1_u32)) } @@ -81,7 +84,8 @@ pub fn _blsmsk_u32(x: u32) -> u32 { #[target_feature(enable = "bmi1")] #[cfg_attr(test, assert_instr(blsr))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blsr_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blsr_u32(x: u32) -> u32 { x & (x.wrapping_sub(1)) } @@ -94,7 +98,8 @@ pub fn _blsr_u32(x: u32) -> u32 { #[target_feature(enable = "bmi1")] #[cfg_attr(test, assert_instr(tzcnt))] #[stable(feature = "simd_x86_updates", since = "1.82.0")] -pub fn _tzcnt_u16(x: u16) -> u16 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _tzcnt_u16(x: u16) -> u16 { x.trailing_zeros() as u16 } @@ -107,7 +112,8 @@ pub fn _tzcnt_u16(x: u16) -> u16 { #[target_feature(enable = "bmi1")] #[cfg_attr(test, assert_instr(tzcnt))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _tzcnt_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _tzcnt_u32(x: u32) -> u32 { x.trailing_zeros() } @@ -120,7 +126,8 @@ pub fn _tzcnt_u32(x: u32) -> u32 { #[target_feature(enable = "bmi1")] #[cfg_attr(test, assert_instr(tzcnt))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_tzcnt_32(x: u32) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_tzcnt_32(x: u32) -> i32 { x.trailing_zeros() as i32 } @@ -131,6 +138,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86::*; @@ -142,7 +150,7 @@ mod tests { } #[simd_test(enable = "bmi1")] - unsafe fn test_andn_u32() { + const unsafe fn test_andn_u32() { assert_eq!(_andn_u32(0, 0), 0); assert_eq!(_andn_u32(0, 1), 1); assert_eq!(_andn_u32(1, 0), 0); @@ -165,32 +173,32 @@ mod tests { } #[simd_test(enable = "bmi1")] - unsafe fn test_blsi_u32() { + const unsafe fn test_blsi_u32() { assert_eq!(_blsi_u32(0b1101_0000u32), 0b0001_0000u32); } #[simd_test(enable = "bmi1")] - unsafe fn test_blsmsk_u32() { + const unsafe fn test_blsmsk_u32() { let r = _blsmsk_u32(0b0011_0000u32); assert_eq!(r, 0b0001_1111u32); } #[simd_test(enable = "bmi1")] - unsafe fn test_blsr_u32() { + const unsafe fn test_blsr_u32() { // TODO: test the behavior when the input is `0`. let r = _blsr_u32(0b0011_0000u32); assert_eq!(r, 0b0010_0000u32); } #[simd_test(enable = "bmi1")] - unsafe fn test_tzcnt_u16() { + const unsafe fn test_tzcnt_u16() { assert_eq!(_tzcnt_u16(0b0000_0001u16), 0u16); assert_eq!(_tzcnt_u16(0b0000_0000u16), 16u16); assert_eq!(_tzcnt_u16(0b1001_0000u16), 4u16); } #[simd_test(enable = "bmi1")] - unsafe fn test_tzcnt_u32() { + const unsafe fn test_tzcnt_u32() { assert_eq!(_tzcnt_u32(0b0000_0001u32), 0u32); assert_eq!(_tzcnt_u32(0b0000_0000u32), 32u32); assert_eq!(_tzcnt_u32(0b1001_0000u32), 4u32); diff --git a/crates/core_arch/src/x86/bmi2.rs b/crates/core_arch/src/x86/bmi2.rs index 83cf650923..b5b7d0f340 100644 --- a/crates/core_arch/src/x86/bmi2.rs +++ b/crates/core_arch/src/x86/bmi2.rs @@ -25,7 +25,8 @@ use stdarch_test::assert_instr; #[cfg_attr(all(test, target_arch = "x86"), assert_instr(mul))] #[target_feature(enable = "bmi2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mulx_u32(a: u32, b: u32, hi: &mut u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mulx_u32(a: u32, b: u32, hi: &mut u32) -> u32 { let result: u64 = (a as u64) * (b as u64); *hi = (result >> 32) as u32; result as u32 @@ -77,6 +78,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86::*; @@ -117,7 +119,7 @@ mod tests { } #[simd_test(enable = "bmi2")] - unsafe fn test_mulx_u32() { + const unsafe fn test_mulx_u32() { let a: u32 = 4_294_967_200; let b: u32 = 2; let mut hi = 0; diff --git a/crates/core_arch/src/x86/bswap.rs b/crates/core_arch/src/x86/bswap.rs index ea07a7d622..f8e177f7c2 100644 --- a/crates/core_arch/src/x86/bswap.rs +++ b/crates/core_arch/src/x86/bswap.rs @@ -10,16 +10,20 @@ use stdarch_test::assert_instr; #[inline] #[cfg_attr(test, assert_instr(bswap))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _bswap(x: i32) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _bswap(x: i32) -> i32 { x.swap_bytes() } #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; + use stdarch_test::simd_test; + use super::*; - #[test] - fn test_bswap() { + #[simd_test] + const fn test_bswap() { assert_eq!(_bswap(0x0EADBE0F), 0x0FBEAD0E); assert_eq!(_bswap(0x00000000), 0x00000000); } diff --git a/crates/core_arch/src/x86/f16c.rs b/crates/core_arch/src/x86/f16c.rs index 519cc38294..09971939ba 100644 --- a/crates/core_arch/src/x86/f16c.rs +++ b/crates/core_arch/src/x86/f16c.rs @@ -25,7 +25,8 @@ unsafe extern "unadjusted" { #[target_feature(enable = "f16c")] #[cfg_attr(test, assert_instr("vcvtph2ps"))] #[stable(feature = "x86_f16c_intrinsics", since = "1.68.0")] -pub fn _mm_cvtph_ps(a: __m128i) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtph_ps(a: __m128i) -> __m128 { unsafe { let a: f16x8 = transmute(a); let a: f16x4 = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -41,7 +42,8 @@ pub fn _mm_cvtph_ps(a: __m128i) -> __m128 { #[target_feature(enable = "f16c")] #[cfg_attr(test, assert_instr("vcvtph2ps"))] #[stable(feature = "x86_f16c_intrinsics", since = "1.68.0")] -pub fn _mm256_cvtph_ps(a: __m128i) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_cvtph_ps(a: __m128i) -> __m256 { unsafe { let a: f16x8 = transmute(a); simd_cast(a) @@ -103,6 +105,7 @@ pub fn _mm256_cvtps_ph(a: __m256) -> __m128i { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use crate::{core_arch::x86::*, mem::transmute}; use stdarch_test::simd_test; @@ -116,7 +119,7 @@ mod tests { const F16_EIGHT: i16 = 0x4800; #[simd_test(enable = "f16c")] - unsafe fn test_mm_cvtph_ps() { + const unsafe fn test_mm_cvtph_ps() { let a = _mm_set_epi16(0, 0, 0, 0, F16_ONE, F16_TWO, F16_THREE, F16_FOUR); let r = _mm_cvtph_ps(a); let e = _mm_set_ps(1.0, 2.0, 3.0, 4.0); @@ -124,7 +127,7 @@ mod tests { } #[simd_test(enable = "f16c")] - unsafe fn test_mm256_cvtph_ps() { + const unsafe fn test_mm256_cvtph_ps() { let a = _mm_set_epi16( F16_ONE, F16_TWO, F16_THREE, F16_FOUR, F16_FIVE, F16_SIX, F16_SEVEN, F16_EIGHT, ); diff --git a/crates/core_arch/src/x86/fma.rs b/crates/core_arch/src/x86/fma.rs index d3988422b9..abf79f4904 100644 --- a/crates/core_arch/src/x86/fma.rs +++ b/crates/core_arch/src/x86/fma.rs @@ -33,7 +33,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_fma(a, b, c) } } @@ -45,7 +46,8 @@ pub fn _mm_fmadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fmadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_fma(a, b, c) } } @@ -57,7 +59,8 @@ pub fn _mm256_fmadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_fma(a, b, c) } } @@ -69,7 +72,8 @@ pub fn _mm_fmadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fmadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { simd_fma(a, b, c) } } @@ -83,7 +87,8 @@ pub fn _mm256_fmadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmadd_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmadd_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_insert!( a, @@ -103,7 +108,8 @@ pub fn _mm_fmadd_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmadd_ss(a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmadd_ss(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_insert!( a, @@ -122,7 +128,8 @@ pub fn _mm_fmadd_ss(a: __m128, b: __m128, c: __m128) -> __m128 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmaddsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmaddsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -139,7 +146,8 @@ pub fn _mm_fmaddsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fmaddsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmaddsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -156,7 +164,8 @@ pub fn _mm256_fmaddsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmaddsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmaddsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -173,7 +182,8 @@ pub fn _mm_fmaddsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmaddsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fmaddsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmaddsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -189,7 +199,8 @@ pub fn _mm256_fmaddsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_fma(a, b, simd_neg(c)) } } @@ -201,7 +212,8 @@ pub fn _mm_fmsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fmsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_fma(a, b, simd_neg(c)) } } @@ -213,7 +225,8 @@ pub fn _mm256_fmsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmsub213ps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_fma(a, b, simd_neg(c)) } } @@ -225,7 +238,8 @@ pub fn _mm_fmsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmsub213ps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fmsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { simd_fma(a, b, simd_neg(c)) } } @@ -239,7 +253,8 @@ pub fn _mm256_fmsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmsub_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmsub_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_insert!( a, @@ -259,7 +274,8 @@ pub fn _mm_fmsub_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmsub_ss(a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmsub_ss(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_insert!( a, @@ -278,7 +294,8 @@ pub fn _mm_fmsub_ss(a: __m128, b: __m128, c: __m128) -> __m128 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmsubadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmsubadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -295,7 +312,8 @@ pub fn _mm_fmsubadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fmsubadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmsubadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -312,7 +330,8 @@ pub fn _mm256_fmsubadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fmsubadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fmsubadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -329,7 +348,8 @@ pub fn _mm_fmsubadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfmsubadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fmsubadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fmsubadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { let add = simd_fma(a, b, c); let sub = simd_fma(a, b, simd_neg(c)); @@ -345,7 +365,8 @@ pub fn _mm256_fmsubadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fnmadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_fma(simd_neg(a), b, c) } } @@ -357,7 +378,8 @@ pub fn _mm_fnmadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fnmadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fnmadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_fma(simd_neg(a), b, c) } } @@ -369,7 +391,8 @@ pub fn _mm256_fnmadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fnmadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_fma(simd_neg(a), b, c) } } @@ -381,7 +404,8 @@ pub fn _mm_fnmadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fnmadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fnmadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { simd_fma(simd_neg(a), b, c) } } @@ -395,7 +419,8 @@ pub fn _mm256_fnmadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_insert!( a, @@ -415,7 +440,8 @@ pub fn _mm_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmadd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fnmadd_ss(a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmadd_ss(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_insert!( a, @@ -434,7 +460,8 @@ pub fn _mm_fnmadd_ss(a: __m128, b: __m128, c: __m128) -> __m128 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fnmsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) } } @@ -447,7 +474,8 @@ pub fn _mm_fnmsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fnmsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fnmsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) } } @@ -460,7 +488,8 @@ pub fn _mm256_fnmsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fnmsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) } } @@ -473,7 +502,8 @@ pub fn _mm_fnmsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_fnmsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_fnmsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 { unsafe { simd_fma(simd_neg(a), b, simd_neg(c)) } } @@ -488,7 +518,8 @@ pub fn _mm256_fnmsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { unsafe { simd_insert!( a, @@ -509,7 +540,8 @@ pub fn _mm_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d { #[target_feature(enable = "fma")] #[cfg_attr(test, assert_instr(vfnmsub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_fnmsub_ss(a: __m128, b: __m128, c: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_fnmsub_ss(a: __m128, b: __m128, c: __m128) -> __m128 { unsafe { simd_insert!( a, @@ -521,13 +553,14 @@ pub fn _mm_fnmsub_ss(a: __m128, b: __m128, c: __m128) -> __m128 { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86::*; #[simd_test(enable = "fma")] - unsafe fn test_mm_fmadd_pd() { + const unsafe fn test_mm_fmadd_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 3.); let c = _mm_setr_pd(4., 9.); @@ -536,7 +569,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fmadd_pd() { + const unsafe fn test_mm256_fmadd_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 3., 7., 2.); let c = _mm256_setr_pd(4., 9., 1., 7.); @@ -545,7 +578,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fmadd_ps() { + const unsafe fn test_mm_fmadd_ps() { let a = _mm_setr_ps(1., 2., 3., 4.); let b = _mm_setr_ps(5., 3., 7., 2.); let c = _mm_setr_ps(4., 9., 1., 7.); @@ -554,7 +587,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fmadd_ps() { + const unsafe fn test_mm256_fmadd_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 0., 10., -1., -2.); let b = _mm256_setr_ps(5., 3., 7., 2., 4., -6., 0., 14.); let c = _mm256_setr_ps(4., 9., 1., 7., -5., 11., -2., -3.); @@ -563,7 +596,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fmadd_sd() { + const unsafe fn test_mm_fmadd_sd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 3.); let c = _mm_setr_pd(4., 9.); @@ -572,7 +605,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fmadd_ss() { + const unsafe fn test_mm_fmadd_ss() { let a = _mm_setr_ps(1., 2., 3., 4.); let b = _mm_setr_ps(5., 3., 7., 2.); let c = _mm_setr_ps(4., 9., 1., 7.); @@ -581,7 +614,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fmaddsub_pd() { + const unsafe fn test_mm_fmaddsub_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 3.); let c = _mm_setr_pd(4., 9.); @@ -590,7 +623,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fmaddsub_pd() { + const unsafe fn test_mm256_fmaddsub_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 3., 7., 2.); let c = _mm256_setr_pd(4., 9., 1., 7.); @@ -599,7 +632,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fmaddsub_ps() { + const unsafe fn test_mm_fmaddsub_ps() { let a = _mm_setr_ps(1., 2., 3., 4.); let b = _mm_setr_ps(5., 3., 7., 2.); let c = _mm_setr_ps(4., 9., 1., 7.); @@ -608,7 +641,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fmaddsub_ps() { + const unsafe fn test_mm256_fmaddsub_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 0., 10., -1., -2.); let b = _mm256_setr_ps(5., 3., 7., 2., 4., -6., 0., 14.); let c = _mm256_setr_ps(4., 9., 1., 7., -5., 11., -2., -3.); @@ -617,7 +650,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fmsub_pd() { + const unsafe fn test_mm_fmsub_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 3.); let c = _mm_setr_pd(4., 9.); @@ -626,7 +659,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fmsub_pd() { + const unsafe fn test_mm256_fmsub_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 3., 7., 2.); let c = _mm256_setr_pd(4., 9., 1., 7.); @@ -635,7 +668,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fmsub_ps() { + const unsafe fn test_mm_fmsub_ps() { let a = _mm_setr_ps(1., 2., 3., 4.); let b = _mm_setr_ps(5., 3., 7., 2.); let c = _mm_setr_ps(4., 9., 1., 7.); @@ -644,7 +677,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fmsub_ps() { + const unsafe fn test_mm256_fmsub_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 0., 10., -1., -2.); let b = _mm256_setr_ps(5., 3., 7., 2., 4., -6., 0., 14.); let c = _mm256_setr_ps(4., 9., 1., 7., -5., 11., -2., -3.); @@ -653,7 +686,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fmsub_sd() { + const unsafe fn test_mm_fmsub_sd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 3.); let c = _mm_setr_pd(4., 9.); @@ -662,7 +695,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fmsub_ss() { + const unsafe fn test_mm_fmsub_ss() { let a = _mm_setr_ps(1., 2., 3., 4.); let b = _mm_setr_ps(5., 3., 7., 2.); let c = _mm_setr_ps(4., 9., 1., 7.); @@ -671,7 +704,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fmsubadd_pd() { + const unsafe fn test_mm_fmsubadd_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 3.); let c = _mm_setr_pd(4., 9.); @@ -680,7 +713,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fmsubadd_pd() { + const unsafe fn test_mm256_fmsubadd_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 3., 7., 2.); let c = _mm256_setr_pd(4., 9., 1., 7.); @@ -689,7 +722,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fmsubadd_ps() { + const unsafe fn test_mm_fmsubadd_ps() { let a = _mm_setr_ps(1., 2., 3., 4.); let b = _mm_setr_ps(5., 3., 7., 2.); let c = _mm_setr_ps(4., 9., 1., 7.); @@ -698,7 +731,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fmsubadd_ps() { + const unsafe fn test_mm256_fmsubadd_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 0., 10., -1., -2.); let b = _mm256_setr_ps(5., 3., 7., 2., 4., -6., 0., 14.); let c = _mm256_setr_ps(4., 9., 1., 7., -5., 11., -2., -3.); @@ -707,7 +740,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fnmadd_pd() { + const unsafe fn test_mm_fnmadd_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 3.); let c = _mm_setr_pd(4., 9.); @@ -716,7 +749,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fnmadd_pd() { + const unsafe fn test_mm256_fnmadd_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 3., 7., 2.); let c = _mm256_setr_pd(4., 9., 1., 7.); @@ -725,7 +758,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fnmadd_ps() { + const unsafe fn test_mm_fnmadd_ps() { let a = _mm_setr_ps(1., 2., 3., 4.); let b = _mm_setr_ps(5., 3., 7., 2.); let c = _mm_setr_ps(4., 9., 1., 7.); @@ -734,7 +767,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fnmadd_ps() { + const unsafe fn test_mm256_fnmadd_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 0., 10., -1., -2.); let b = _mm256_setr_ps(5., 3., 7., 2., 4., -6., 0., 14.); let c = _mm256_setr_ps(4., 9., 1., 7., -5., 11., -2., -3.); @@ -743,7 +776,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fnmadd_sd() { + const unsafe fn test_mm_fnmadd_sd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 3.); let c = _mm_setr_pd(4., 9.); @@ -752,7 +785,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fnmadd_ss() { + const unsafe fn test_mm_fnmadd_ss() { let a = _mm_setr_ps(1., 2., 3., 4.); let b = _mm_setr_ps(5., 3., 7., 2.); let c = _mm_setr_ps(4., 9., 1., 7.); @@ -761,7 +794,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fnmsub_pd() { + const unsafe fn test_mm_fnmsub_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 3.); let c = _mm_setr_pd(4., 9.); @@ -770,7 +803,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fnmsub_pd() { + const unsafe fn test_mm256_fnmsub_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 3., 7., 2.); let c = _mm256_setr_pd(4., 9., 1., 7.); @@ -779,7 +812,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fnmsub_ps() { + const unsafe fn test_mm_fnmsub_ps() { let a = _mm_setr_ps(1., 2., 3., 4.); let b = _mm_setr_ps(5., 3., 7., 2.); let c = _mm_setr_ps(4., 9., 1., 7.); @@ -788,7 +821,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm256_fnmsub_ps() { + const unsafe fn test_mm256_fnmsub_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 0., 10., -1., -2.); let b = _mm256_setr_ps(5., 3., 7., 2., 4., -6., 0., 14.); let c = _mm256_setr_ps(4., 9., 1., 7., -5., 11., -2., -3.); @@ -797,7 +830,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fnmsub_sd() { + const unsafe fn test_mm_fnmsub_sd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 3.); let c = _mm_setr_pd(4., 9.); @@ -806,7 +839,7 @@ mod tests { } #[simd_test(enable = "fma")] - unsafe fn test_mm_fnmsub_ss() { + const unsafe fn test_mm_fnmsub_ss() { let a = _mm_setr_ps(1., 2., 3., 4.); let b = _mm_setr_ps(5., 3., 7., 2.); let c = _mm_setr_ps(4., 9., 1., 7.); diff --git a/crates/core_arch/src/x86/mod.rs b/crates/core_arch/src/x86/mod.rs index 79a593e647..7612cffd07 100644 --- a/crates/core_arch/src/x86/mod.rs +++ b/crates/core_arch/src/x86/mod.rs @@ -520,14 +520,14 @@ macro_rules! as_transmute { ($from:ty => $as_from:ident, $($as_to:ident -> $to:ident),* $(,)?) => { impl $from {$( #[inline] - pub(crate) fn $as_to(self) -> crate::core_arch::simd::$to { + pub(crate) const fn $as_to(self) -> crate::core_arch::simd::$to { unsafe { transmute(self) } } )*} $( impl crate::core_arch::simd::$to { #[inline] - pub(crate) fn $as_from(self) -> $from { + pub(crate) const fn $as_from(self) -> $from { unsafe { transmute(self) } } } diff --git a/crates/core_arch/src/x86/sse.rs b/crates/core_arch/src/x86/sse.rs index 7dd96dd1c9..165c61104e 100644 --- a/crates/core_arch/src/x86/sse.rs +++ b/crates/core_arch/src/x86/sse.rs @@ -18,7 +18,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(addss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_add_ss(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_add_ss(a: __m128, b: __m128) -> __m128 { unsafe { simd_insert!(a, 0, _mm_cvtss_f32(a) + _mm_cvtss_f32(b)) } } @@ -30,7 +31,8 @@ pub fn _mm_add_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(addps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_add_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_add_ps(a: __m128, b: __m128) -> __m128 { unsafe { simd_add(a, b) } } @@ -42,7 +44,8 @@ pub fn _mm_add_ps(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(subss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_sub_ss(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sub_ss(a: __m128, b: __m128) -> __m128 { unsafe { simd_insert!(a, 0, _mm_cvtss_f32(a) - _mm_cvtss_f32(b)) } } @@ -54,7 +57,8 @@ pub fn _mm_sub_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(subps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_sub_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sub_ps(a: __m128, b: __m128) -> __m128 { unsafe { simd_sub(a, b) } } @@ -66,7 +70,8 @@ pub fn _mm_sub_ps(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(mulss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_mul_ss(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mul_ss(a: __m128, b: __m128) -> __m128 { unsafe { simd_insert!(a, 0, _mm_cvtss_f32(a) * _mm_cvtss_f32(b)) } } @@ -78,7 +83,8 @@ pub fn _mm_mul_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(mulps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_mul_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mul_ps(a: __m128, b: __m128) -> __m128 { unsafe { simd_mul(a, b) } } @@ -90,7 +96,8 @@ pub fn _mm_mul_ps(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(divss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_div_ss(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_div_ss(a: __m128, b: __m128) -> __m128 { unsafe { simd_insert!(a, 0, _mm_cvtss_f32(a) / _mm_cvtss_f32(b)) } } @@ -102,7 +109,8 @@ pub fn _mm_div_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(divps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_div_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_div_ps(a: __m128, b: __m128) -> __m128 { unsafe { simd_div(a, b) } } @@ -241,7 +249,8 @@ pub fn _mm_max_ps(a: __m128, b: __m128) -> __m128 { assert_instr(andps) )] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_and_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_and_ps(a: __m128, b: __m128) -> __m128 { unsafe { let a: __m128i = mem::transmute(a); let b: __m128i = mem::transmute(b); @@ -264,7 +273,8 @@ pub fn _mm_and_ps(a: __m128, b: __m128) -> __m128 { assert_instr(andnps) )] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_andnot_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_andnot_ps(a: __m128, b: __m128) -> __m128 { unsafe { let a: __m128i = mem::transmute(a); let b: __m128i = mem::transmute(b); @@ -284,7 +294,8 @@ pub fn _mm_andnot_ps(a: __m128, b: __m128) -> __m128 { assert_instr(orps) )] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_or_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_or_ps(a: __m128, b: __m128) -> __m128 { unsafe { let a: __m128i = mem::transmute(a); let b: __m128i = mem::transmute(b); @@ -304,7 +315,8 @@ pub fn _mm_or_ps(a: __m128, b: __m128) -> __m128 { assert_instr(xorps) )] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_xor_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_xor_ps(a: __m128, b: __m128) -> __m128 { unsafe { let a: __m128i = mem::transmute(a); let b: __m128i = mem::transmute(b); @@ -866,7 +878,8 @@ pub fn _mm_cvtt_ss2si(a: __m128) -> i32 { // No point in using assert_instrs. In Unix x86_64 calling convention this is a // no-op, and on msvc it's just a `mov`. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtss_f32(a: __m128) -> f32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtss_f32(a: __m128) -> f32 { unsafe { simd_extract!(a, 0) } } @@ -881,7 +894,8 @@ pub fn _mm_cvtss_f32(a: __m128) -> f32 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(cvtsi2ss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsi32_ss(a: __m128, b: i32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi32_ss(a: __m128, b: i32) -> __m128 { unsafe { simd_insert!(a, 0, b as f32) } } @@ -904,7 +918,8 @@ pub fn _mm_cvt_si2ss(a: __m128, b: i32) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(movss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set_ss(a: f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_ss(a: f32) -> __m128 { __m128([a, 0.0, 0.0, 0.0]) } @@ -915,7 +930,8 @@ pub fn _mm_set_ss(a: f32) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(shufps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set1_ps(a: f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set1_ps(a: f32) -> __m128 { __m128([a, a, a, a]) } @@ -926,7 +942,8 @@ pub fn _mm_set1_ps(a: f32) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(shufps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set_ps1(a: f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_ps1(a: f32) -> __m128 { _mm_set1_ps(a) } @@ -953,7 +970,8 @@ pub fn _mm_set_ps1(a: f32) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(unpcklps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { __m128([d, c, b, a]) } @@ -979,7 +997,8 @@ pub fn _mm_set_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { assert_instr(movaps) )] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_setr_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_setr_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { __m128([a, b, c, d]) } @@ -990,7 +1009,8 @@ pub fn _mm_setr_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(xorps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_setzero_ps() -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_setzero_ps() -> __m128 { const { unsafe { mem::zeroed() } } } @@ -1021,7 +1041,8 @@ pub const fn _MM_SHUFFLE(z: u32, y: u32, x: u32, w: u32) -> i32 { #[cfg_attr(test, assert_instr(shufps, MASK = 3))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_shuffle_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shuffle_ps(a: __m128, b: __m128) -> __m128 { static_assert_uimm_bits!(MASK, 8); unsafe { simd_shuffle!( @@ -1045,7 +1066,8 @@ pub fn _mm_shuffle_ps(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(unpckhps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpackhi_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpackhi_ps(a: __m128, b: __m128) -> __m128 { unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } } @@ -1057,7 +1079,8 @@ pub fn _mm_unpackhi_ps(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(unpcklps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpacklo_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpacklo_ps(a: __m128, b: __m128) -> __m128 { unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } } @@ -1069,7 +1092,8 @@ pub fn _mm_unpacklo_ps(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(movhlps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_movehl_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movehl_ps(a: __m128, b: __m128) -> __m128 { // TODO; figure why this is a different instruction on msvc? unsafe { simd_shuffle!(a, b, [6, 7, 2, 3]) } } @@ -1082,7 +1106,8 @@ pub fn _mm_movehl_ps(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(movlhps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_movelh_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movelh_ps(a: __m128, b: __m128) -> __m128 { unsafe { simd_shuffle!(a, b, [0, 1, 4, 5]) } } @@ -1096,12 +1121,13 @@ pub fn _mm_movelh_ps(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(movmskps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_movemask_ps(a: __m128) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movemask_ps(a: __m128) -> i32 { // Propagate the highest bit to the rest, because simd_bitmask // requires all-1 or all-0. unsafe { let mask: i32x4 = simd_lt(transmute(a), i32x4::ZERO); - simd_bitmask::(mask).into() + simd_bitmask::(mask) as i32 } } @@ -1115,7 +1141,8 @@ pub fn _mm_movemask_ps(a: __m128) -> i32 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(movss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_load_ss(p: *const f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load_ss(p: *const f32) -> __m128 { __m128([*p, 0.0, 0.0, 0.0]) } @@ -1130,7 +1157,8 @@ pub unsafe fn _mm_load_ss(p: *const f32) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(movss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_load1_ps(p: *const f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load1_ps(p: *const f32) -> __m128 { let a = *p; __m128([a, a, a, a]) } @@ -1142,7 +1170,8 @@ pub unsafe fn _mm_load1_ps(p: *const f32) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(movss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_load_ps1(p: *const f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load_ps1(p: *const f32) -> __m128 { _mm_load1_ps(p) } @@ -1166,7 +1195,8 @@ pub unsafe fn _mm_load_ps1(p: *const f32) -> __m128 { )] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm_load_ps(p: *const f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load_ps(p: *const f32) -> __m128 { *(p as *const __m128) } @@ -1183,7 +1213,8 @@ pub unsafe fn _mm_load_ps(p: *const f32) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(movups))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_loadu_ps(p: *const f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadu_ps(p: *const f32) -> __m128 { // Note: Using `*p` would require `f32` alignment, but `movups` has no // alignment restrictions. let mut dst = _mm_undefined_ps(); @@ -1223,7 +1254,8 @@ pub unsafe fn _mm_loadu_ps(p: *const f32) -> __m128 { assert_instr(movaps) )] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_loadr_ps(p: *const f32) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadr_ps(p: *const f32) -> __m128 { let a = _mm_load_ps(p); simd_shuffle!(a, a, [3, 2, 1, 0]) } @@ -1237,7 +1269,8 @@ pub unsafe fn _mm_loadr_ps(p: *const f32) -> __m128 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(movss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_store_ss(p: *mut f32, a: __m128) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store_ss(p: *mut f32, a: __m128) { *p = simd_extract!(a, 0); } @@ -1267,7 +1300,8 @@ pub unsafe fn _mm_store_ss(p: *mut f32, a: __m128) { )] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm_store1_ps(p: *mut f32, a: __m128) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store1_ps(p: *mut f32, a: __m128) { let b: __m128 = simd_shuffle!(a, a, [0, 0, 0, 0]); *(p as *mut __m128) = b; } @@ -1282,7 +1316,8 @@ pub unsafe fn _mm_store1_ps(p: *mut f32, a: __m128) { assert_instr(movaps) )] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_store_ps1(p: *mut f32, a: __m128) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store_ps1(p: *mut f32, a: __m128) { _mm_store1_ps(p, a); } @@ -1305,7 +1340,8 @@ pub unsafe fn _mm_store_ps1(p: *mut f32, a: __m128) { )] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm_store_ps(p: *mut f32, a: __m128) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store_ps(p: *mut f32, a: __m128) { *(p as *mut __m128) = a; } @@ -1320,7 +1356,8 @@ pub unsafe fn _mm_store_ps(p: *mut f32, a: __m128) { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(movups))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_storeu_ps(p: *mut f32, a: __m128) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeu_ps(p: *mut f32, a: __m128) { ptr::copy_nonoverlapping( ptr::addr_of!(a) as *const u8, p as *mut u8, @@ -1352,7 +1389,8 @@ pub unsafe fn _mm_storeu_ps(p: *mut f32, a: __m128) { )] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm_storer_ps(p: *mut f32, a: __m128) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storer_ps(p: *mut f32, a: __m128) { let b: __m128 = simd_shuffle!(a, a, [3, 2, 1, 0]); *(p as *mut __m128) = b; } @@ -1370,7 +1408,8 @@ pub unsafe fn _mm_storer_ps(p: *mut f32, a: __m128) { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(movss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_move_ss(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_move_ss(a: __m128, b: __m128) -> __m128 { unsafe { simd_shuffle!(a, b, [4, 1, 2, 3]) } } @@ -1917,7 +1956,8 @@ pub fn _mm_prefetch(p: *const i8) { #[inline] #[target_feature(enable = "sse")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_undefined_ps() -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_undefined_ps() -> __m128 { const { unsafe { mem::zeroed() } } } @@ -1928,7 +1968,8 @@ pub fn _mm_undefined_ps() -> __m128 { #[allow(non_snake_case)] #[target_feature(enable = "sse")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _MM_TRANSPOSE4_PS( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _MM_TRANSPOSE4_PS( row0: &mut __m128, row1: &mut __m128, row2: &mut __m128, @@ -2037,6 +2078,7 @@ pub unsafe fn _mm_stream_ps(mem_addr: *mut f32, a: __m128) { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use crate::{hint::black_box, mem::transmute, ptr}; use std::boxed; use stdarch_test::simd_test; @@ -2046,7 +2088,7 @@ mod tests { const NAN: f32 = f32::NAN; #[simd_test(enable = "sse")] - unsafe fn test_mm_add_ps() { + const unsafe fn test_mm_add_ps() { let a = _mm_setr_ps(-1.0, 5.0, 0.0, -10.0); let b = _mm_setr_ps(-100.0, 20.0, 0.0, -5.0); let r = _mm_add_ps(a, b); @@ -2054,7 +2096,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_add_ss() { + const unsafe fn test_mm_add_ss() { let a = _mm_set_ps(-1.0, 5.0, 0.0, -10.0); let b = _mm_set_ps(-100.0, 20.0, 0.0, -5.0); let r = _mm_add_ss(a, b); @@ -2062,7 +2104,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_sub_ps() { + const unsafe fn test_mm_sub_ps() { let a = _mm_setr_ps(-1.0, 5.0, 0.0, -10.0); let b = _mm_setr_ps(-100.0, 20.0, 0.0, -5.0); let r = _mm_sub_ps(a, b); @@ -2070,7 +2112,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_sub_ss() { + const unsafe fn test_mm_sub_ss() { let a = _mm_setr_ps(-1.0, 5.0, 0.0, -10.0); let b = _mm_setr_ps(-100.0, 20.0, 0.0, -5.0); let r = _mm_sub_ss(a, b); @@ -2078,7 +2120,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_mul_ps() { + const unsafe fn test_mm_mul_ps() { let a = _mm_setr_ps(-1.0, 5.0, 0.0, -10.0); let b = _mm_setr_ps(-100.0, 20.0, 0.0, -5.0); let r = _mm_mul_ps(a, b); @@ -2086,7 +2128,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_mul_ss() { + const unsafe fn test_mm_mul_ss() { let a = _mm_setr_ps(-1.0, 5.0, 0.0, -10.0); let b = _mm_setr_ps(-100.0, 20.0, 0.0, -5.0); let r = _mm_mul_ss(a, b); @@ -2094,7 +2136,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_div_ps() { + const unsafe fn test_mm_div_ps() { let a = _mm_setr_ps(-1.0, 5.0, 2.0, -10.0); let b = _mm_setr_ps(-100.0, 20.0, 0.2, -5.0); let r = _mm_div_ps(a, b); @@ -2102,7 +2144,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_div_ss() { + const unsafe fn test_mm_div_ss() { let a = _mm_setr_ps(-1.0, 5.0, 0.0, -10.0); let b = _mm_setr_ps(-100.0, 20.0, 0.0, -5.0); let r = _mm_div_ss(a, b); @@ -2229,7 +2271,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_and_ps() { + const unsafe fn test_mm_and_ps() { let a = transmute(u32x4::splat(0b0011)); let b = transmute(u32x4::splat(0b0101)); let r = _mm_and_ps(*black_box(&a), *black_box(&b)); @@ -2238,7 +2280,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_andnot_ps() { + const unsafe fn test_mm_andnot_ps() { let a = transmute(u32x4::splat(0b0011)); let b = transmute(u32x4::splat(0b0101)); let r = _mm_andnot_ps(*black_box(&a), *black_box(&b)); @@ -2247,7 +2289,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_or_ps() { + const unsafe fn test_mm_or_ps() { let a = transmute(u32x4::splat(0b0011)); let b = transmute(u32x4::splat(0b0101)); let r = _mm_or_ps(*black_box(&a), *black_box(&b)); @@ -2256,7 +2298,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_xor_ps() { + const unsafe fn test_mm_xor_ps() { let a = transmute(u32x4::splat(0b0011)); let b = transmute(u32x4::splat(0b0101)); let r = _mm_xor_ps(*black_box(&a), *black_box(&b)); @@ -2979,36 +3021,40 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_cvtsi32_ss() { - let inputs = &[ - (4555i32, 4555.0f32), - (322223333, 322223330.0), - (-432, -432.0), - (-322223333, -322223330.0), - ]; + const unsafe fn test_mm_cvtsi32_ss() { + let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); - for &(x, f) in inputs.iter() { - let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); - let r = _mm_cvtsi32_ss(a, x); - let e = _mm_setr_ps(f, 6.0, 7.0, 8.0); - assert_eq_m128(e, r); - } + let r = _mm_cvtsi32_ss(a, 4555); + let e = _mm_setr_ps(4555.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + + let r = _mm_cvtsi32_ss(a, 322223333); + let e = _mm_setr_ps(322223333.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + + let r = _mm_cvtsi32_ss(a, -432); + let e = _mm_setr_ps(-432.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + + let r = _mm_cvtsi32_ss(a, -322223333); + let e = _mm_setr_ps(-322223333.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); } #[simd_test(enable = "sse")] - unsafe fn test_mm_cvtss_f32() { + const unsafe fn test_mm_cvtss_f32() { let a = _mm_setr_ps(312.0134, 5.0, 6.0, 7.0); assert_eq!(_mm_cvtss_f32(a), 312.0134); } #[simd_test(enable = "sse")] - unsafe fn test_mm_set_ss() { + const unsafe fn test_mm_set_ss() { let r = _mm_set_ss(black_box(4.25)); assert_eq_m128(r, _mm_setr_ps(4.25, 0.0, 0.0, 0.0)); } #[simd_test(enable = "sse")] - unsafe fn test_mm_set1_ps() { + const unsafe fn test_mm_set1_ps() { let r1 = _mm_set1_ps(black_box(4.25)); let r2 = _mm_set_ps1(black_box(4.25)); assert_eq!(get_m128(r1, 0), 4.25); @@ -3022,7 +3068,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_set_ps() { + const unsafe fn test_mm_set_ps() { let r = _mm_set_ps( black_box(1.0), black_box(2.0), @@ -3036,7 +3082,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_setr_ps() { + const unsafe fn test_mm_setr_ps() { let r = _mm_setr_ps( black_box(1.0), black_box(2.0), @@ -3047,21 +3093,21 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_setzero_ps() { + const unsafe fn test_mm_setzero_ps() { let r = *black_box(&_mm_setzero_ps()); assert_eq_m128(r, _mm_set1_ps(0.0)); } - #[test] + #[simd_test] #[allow(non_snake_case)] - fn test_MM_SHUFFLE() { + const fn test_MM_SHUFFLE() { assert_eq!(_MM_SHUFFLE(0, 1, 1, 3), 0b00_01_01_11); assert_eq!(_MM_SHUFFLE(3, 1, 1, 0), 0b11_01_01_00); assert_eq!(_MM_SHUFFLE(1, 2, 2, 1), 0b01_10_10_01); } #[simd_test(enable = "sse")] - unsafe fn test_mm_shuffle_ps() { + const unsafe fn test_mm_shuffle_ps() { let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); let b = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); let r = _mm_shuffle_ps::<0b00_01_01_11>(a, b); @@ -3069,7 +3115,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_unpackhi_ps() { + const unsafe fn test_mm_unpackhi_ps() { let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); let b = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); let r = _mm_unpackhi_ps(a, b); @@ -3077,7 +3123,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_unpacklo_ps() { + const unsafe fn test_mm_unpacklo_ps() { let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); let b = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); let r = _mm_unpacklo_ps(a, b); @@ -3085,7 +3131,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_movehl_ps() { + const unsafe fn test_mm_movehl_ps() { let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); let b = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); let r = _mm_movehl_ps(a, b); @@ -3093,7 +3139,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_movelh_ps() { + const unsafe fn test_mm_movelh_ps() { let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); let b = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); let r = _mm_movelh_ps(a, b); @@ -3101,43 +3147,35 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_load_ss() { + const unsafe fn test_mm_load_ss() { let a = 42.0f32; let r = _mm_load_ss(ptr::addr_of!(a)); assert_eq_m128(r, _mm_setr_ps(42.0, 0.0, 0.0, 0.0)); } #[simd_test(enable = "sse")] - unsafe fn test_mm_load1_ps() { + const unsafe fn test_mm_load1_ps() { let a = 42.0f32; let r = _mm_load1_ps(ptr::addr_of!(a)); assert_eq_m128(r, _mm_setr_ps(42.0, 42.0, 42.0, 42.0)); } #[simd_test(enable = "sse")] - unsafe fn test_mm_load_ps() { - let vals = &[1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; - - let mut p = vals.as_ptr(); - let mut fixup = 0.0f32; + const unsafe fn test_mm_load_ps() { + let vals = Memory { + data: [1.0f32, 2.0, 3.0, 4.0], + }; - // Make sure p is aligned, otherwise we might get a - // (signal: 11, SIGSEGV: invalid memory reference) - - let unalignment = (p as usize) & 0xf; - if unalignment != 0 { - let delta = (16 - unalignment) >> 2; - fixup = delta as f32; - p = p.add(delta); - } + // guaranteed to be aligned to 16 bytes + let p = vals.data.as_ptr(); let r = _mm_load_ps(p); - let e = _mm_add_ps(_mm_setr_ps(1.0, 2.0, 3.0, 4.0), _mm_set1_ps(fixup)); + let e = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); assert_eq_m128(r, e); } #[simd_test(enable = "sse")] - unsafe fn test_mm_loadu_ps() { + const unsafe fn test_mm_loadu_ps() { let vals = &[1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; let p = vals.as_ptr().add(3); let r = _mm_loadu_ps(black_box(p)); @@ -3145,29 +3183,21 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_loadr_ps() { - let vals = &[1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; - - let mut p = vals.as_ptr(); - let mut fixup = 0.0f32; + const unsafe fn test_mm_loadr_ps() { + let vals = Memory { + data: [1.0f32, 2.0, 3.0, 4.0], + }; - // Make sure p is aligned, otherwise we might get a - // (signal: 11, SIGSEGV: invalid memory reference) - - let unalignment = (p as usize) & 0xf; - if unalignment != 0 { - let delta = (16 - unalignment) >> 2; - fixup = delta as f32; - p = p.add(delta); - } + // guaranteed to be aligned to 16 bytes + let p = vals.data.as_ptr(); let r = _mm_loadr_ps(p); - let e = _mm_add_ps(_mm_setr_ps(4.0, 3.0, 2.0, 1.0), _mm_set1_ps(fixup)); + let e = _mm_setr_ps(4.0, 3.0, 2.0, 1.0); assert_eq_m128(r, e); } #[simd_test(enable = "sse")] - unsafe fn test_mm_store_ss() { + const unsafe fn test_mm_store_ss() { let mut vals = [0.0f32; 8]; let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); _mm_store_ss(vals.as_mut_ptr().add(1), a); @@ -3178,110 +3208,65 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_store1_ps() { - let mut vals = [0.0f32; 8]; + const unsafe fn test_mm_store1_ps() { + let mut vals = Memory { data: [0.0f32; 4] }; let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let mut ofs = 0; - let mut p = vals.as_mut_ptr(); - - if (p as usize) & 0xf != 0 { - ofs = (16 - ((p as usize) & 0xf)) >> 2; - p = p.add(ofs); - } + // guaranteed to be aligned to 16 bytes + let p = vals.data.as_mut_ptr(); _mm_store1_ps(p, *black_box(&a)); - if ofs > 0 { - assert_eq!(vals[ofs - 1], 0.0); - } - assert_eq!(vals[ofs + 0], 1.0); - assert_eq!(vals[ofs + 1], 1.0); - assert_eq!(vals[ofs + 2], 1.0); - assert_eq!(vals[ofs + 3], 1.0); - assert_eq!(vals[ofs + 4], 0.0); + assert_eq!(vals.data, [1.0, 1.0, 1.0, 1.0]); } #[simd_test(enable = "sse")] - unsafe fn test_mm_store_ps() { - let mut vals = [0.0f32; 8]; + const unsafe fn test_mm_store_ps() { + let mut vals = Memory { data: [0.0f32; 4] }; let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let mut ofs = 0; - let mut p = vals.as_mut_ptr(); - - // Align p to 16-byte boundary - if (p as usize) & 0xf != 0 { - ofs = (16 - ((p as usize) & 0xf)) >> 2; - p = p.add(ofs); - } + // guaranteed to be aligned to 16 bytes + let p = vals.data.as_mut_ptr(); _mm_store_ps(p, *black_box(&a)); - if ofs > 0 { - assert_eq!(vals[ofs - 1], 0.0); - } - assert_eq!(vals[ofs + 0], 1.0); - assert_eq!(vals[ofs + 1], 2.0); - assert_eq!(vals[ofs + 2], 3.0); - assert_eq!(vals[ofs + 3], 4.0); - assert_eq!(vals[ofs + 4], 0.0); + assert_eq!(vals.data, [1.0, 2.0, 3.0, 4.0]); } #[simd_test(enable = "sse")] - unsafe fn test_mm_storer_ps() { - let mut vals = [0.0f32; 8]; + const unsafe fn test_mm_storer_ps() { + let mut vals = Memory { data: [0.0f32; 4] }; let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let mut ofs = 0; - let mut p = vals.as_mut_ptr(); - - // Align p to 16-byte boundary - if (p as usize) & 0xf != 0 { - ofs = (16 - ((p as usize) & 0xf)) >> 2; - p = p.add(ofs); - } + // guaranteed to be aligned to 16 bytes + let p = vals.data.as_mut_ptr(); _mm_storer_ps(p, *black_box(&a)); - if ofs > 0 { - assert_eq!(vals[ofs - 1], 0.0); - } - assert_eq!(vals[ofs + 0], 4.0); - assert_eq!(vals[ofs + 1], 3.0); - assert_eq!(vals[ofs + 2], 2.0); - assert_eq!(vals[ofs + 3], 1.0); - assert_eq!(vals[ofs + 4], 0.0); + assert_eq!(vals.data, [4.0, 3.0, 2.0, 1.0]); } #[simd_test(enable = "sse")] - unsafe fn test_mm_storeu_ps() { - let mut vals = [0.0f32; 8]; - let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + const unsafe fn test_mm_storeu_ps() { + #[repr(align(16))] + struct Memory8 { + data: [f32; 8], + } - let mut ofs = 0; - let mut p = vals.as_mut_ptr(); + // guaranteed to be aligned to 16 bytes + let mut vals = Memory8 { data: [0.0f32; 8] }; + let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - // Make sure p is **not** aligned to 16-byte boundary - if (p as usize) & 0xf == 0 { - ofs = 1; - p = p.add(1); - } + // guaranteed to be *not* aligned to 16 bytes + let p = vals.data.as_mut_ptr().offset(1); _mm_storeu_ps(p, *black_box(&a)); - if ofs > 0 { - assert_eq!(vals[ofs - 1], 0.0); - } - assert_eq!(vals[ofs + 0], 1.0); - assert_eq!(vals[ofs + 1], 2.0); - assert_eq!(vals[ofs + 2], 3.0); - assert_eq!(vals[ofs + 3], 4.0); - assert_eq!(vals[ofs + 4], 0.0); + assert_eq!(vals.data, [0.0, 1.0, 2.0, 3.0, 4.0, 0.0, 0.0, 0.0]); } #[simd_test(enable = "sse")] - unsafe fn test_mm_move_ss() { + const unsafe fn test_mm_move_ss() { let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); let b = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); @@ -3291,7 +3276,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_movemask_ps() { + const unsafe fn test_mm_movemask_ps() { let r = _mm_movemask_ps(_mm_setr_ps(-1.0, 5.0, -5.0, 0.0)); assert_eq!(r, 0b0101); @@ -3307,7 +3292,7 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_MM_TRANSPOSE4_PS() { + const unsafe fn test_MM_TRANSPOSE4_PS() { let mut a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); let mut b = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); let mut c = _mm_setr_ps(9.0, 10.0, 11.0, 12.0); diff --git a/crates/core_arch/src/x86/sse2.rs b/crates/core_arch/src/x86/sse2.rs index 93fe7a7ae3..098cd67c02 100644 --- a/crates/core_arch/src/x86/sse2.rs +++ b/crates/core_arch/src/x86/sse2.rs @@ -76,7 +76,8 @@ pub fn _mm_mfence() { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(paddb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_add_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_add_epi8(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_add(a.as_i8x16(), b.as_i8x16())) } } @@ -87,7 +88,8 @@ pub fn _mm_add_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(paddw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_add_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_add_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_add(a.as_i16x8(), b.as_i16x8())) } } @@ -98,7 +100,8 @@ pub fn _mm_add_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(paddd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_add_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_add_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_add(a.as_i32x4(), b.as_i32x4())) } } @@ -109,7 +112,8 @@ pub fn _mm_add_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(paddq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_add_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_add_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_add(a.as_i64x2(), b.as_i64x2())) } } @@ -120,7 +124,8 @@ pub fn _mm_add_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(paddsb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_adds_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_adds_epi8(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_saturating_add(a.as_i8x16(), b.as_i8x16())) } } @@ -131,7 +136,8 @@ pub fn _mm_adds_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(paddsw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_adds_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_adds_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_saturating_add(a.as_i16x8(), b.as_i16x8())) } } @@ -142,7 +148,8 @@ pub fn _mm_adds_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(paddusb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_adds_epu8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_adds_epu8(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_saturating_add(a.as_u8x16(), b.as_u8x16())) } } @@ -153,7 +160,8 @@ pub fn _mm_adds_epu8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(paddusw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_adds_epu16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_adds_epu16(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_saturating_add(a.as_u16x8(), b.as_u16x8())) } } @@ -164,7 +172,8 @@ pub fn _mm_adds_epu16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pavgb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_avg_epu8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_avg_epu8(a: __m128i, b: __m128i) -> __m128i { unsafe { let a = simd_cast::<_, u16x16>(a.as_u8x16()); let b = simd_cast::<_, u16x16>(b.as_u8x16()); @@ -180,7 +189,8 @@ pub fn _mm_avg_epu8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pavgw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_avg_epu16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_avg_epu16(a: __m128i, b: __m128i) -> __m128i { unsafe { let a = simd_cast::<_, u32x8>(a.as_u16x8()); let b = simd_cast::<_, u32x8>(b.as_u16x8()); @@ -200,7 +210,8 @@ pub fn _mm_avg_epu16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pmaddwd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_madd_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_madd_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { let r: i32x8 = simd_mul(simd_cast(a.as_i16x8()), simd_cast(b.as_i16x8())); let even: i32x4 = simd_shuffle!(r, r, [0, 2, 4, 6]); @@ -217,7 +228,8 @@ pub fn _mm_madd_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pmaxsw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_max_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_max_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imax(a.as_i16x8(), b.as_i16x8()).as_m128i() } } @@ -229,7 +241,8 @@ pub fn _mm_max_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pmaxub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_max_epu8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_max_epu8(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imax(a.as_u8x16(), b.as_u8x16()).as_m128i() } } @@ -241,7 +254,8 @@ pub fn _mm_max_epu8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pminsw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_min_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_min_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imin(a.as_i16x8(), b.as_i16x8()).as_m128i() } } @@ -253,7 +267,8 @@ pub fn _mm_min_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pminub))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_min_epu8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_min_epu8(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imin(a.as_u8x16(), b.as_u8x16()).as_m128i() } } @@ -267,7 +282,8 @@ pub fn _mm_min_epu8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pmulhw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_mulhi_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mulhi_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { let a = simd_cast::<_, i32x8>(a.as_i16x8()); let b = simd_cast::<_, i32x8>(b.as_i16x8()); @@ -286,7 +302,8 @@ pub fn _mm_mulhi_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pmulhuw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_mulhi_epu16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mulhi_epu16(a: __m128i, b: __m128i) -> __m128i { unsafe { let a = simd_cast::<_, u32x8>(a.as_u16x8()); let b = simd_cast::<_, u32x8>(b.as_u16x8()); @@ -305,7 +322,8 @@ pub fn _mm_mulhi_epu16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pmullw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_mullo_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mullo_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_mul(a.as_i16x8(), b.as_i16x8())) } } @@ -319,11 +337,12 @@ pub fn _mm_mullo_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pmuludq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_mul_epu32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mul_epu32(a: __m128i, b: __m128i) -> __m128i { unsafe { let a = a.as_u64x2(); let b = b.as_u64x2(); - let mask = u64x2::splat(u32::MAX.into()); + let mask = u64x2::splat(u32::MAX as u64); transmute(simd_mul(simd_and(a, mask), simd_and(b, mask))) } } @@ -351,7 +370,8 @@ pub fn _mm_sad_epu8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(psubb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_sub_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sub_epi8(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_sub(a.as_i8x16(), b.as_i8x16())) } } @@ -362,7 +382,8 @@ pub fn _mm_sub_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(psubw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_sub_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sub_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_sub(a.as_i16x8(), b.as_i16x8())) } } @@ -373,7 +394,8 @@ pub fn _mm_sub_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(psubd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_sub_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sub_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_sub(a.as_i32x4(), b.as_i32x4())) } } @@ -384,7 +406,8 @@ pub fn _mm_sub_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(psubq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_sub_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sub_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_sub(a.as_i64x2(), b.as_i64x2())) } } @@ -396,7 +419,8 @@ pub fn _mm_sub_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(psubsb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_subs_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_subs_epi8(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_saturating_sub(a.as_i8x16(), b.as_i8x16())) } } @@ -408,7 +432,8 @@ pub fn _mm_subs_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(psubsw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_subs_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_subs_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_saturating_sub(a.as_i16x8(), b.as_i16x8())) } } @@ -420,7 +445,8 @@ pub fn _mm_subs_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(psubusb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_subs_epu8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_subs_epu8(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_saturating_sub(a.as_u8x16(), b.as_u8x16())) } } @@ -432,7 +458,8 @@ pub fn _mm_subs_epu8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(psubusw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_subs_epu16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_subs_epu16(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_saturating_sub(a.as_u16x8(), b.as_u16x8())) } } @@ -444,7 +471,8 @@ pub fn _mm_subs_epu16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pslldq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_slli_si128(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_slli_si128(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { _mm_slli_si128_impl::(a) } } @@ -453,7 +481,8 @@ pub fn _mm_slli_si128(a: __m128i) -> __m128i { /// `_mm_slli_si128` intrinsic into a compile-time constant. #[inline] #[target_feature(enable = "sse2")] -unsafe fn _mm_slli_si128_impl(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +const unsafe fn _mm_slli_si128_impl(a: __m128i) -> __m128i { const fn mask(shift: i32, i: u32) -> u32 { let shift = shift as u32 & 0xff; if shift > 15 { i } else { 16 - shift + i } @@ -490,7 +519,8 @@ unsafe fn _mm_slli_si128_impl(a: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pslldq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_bslli_si128(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_bslli_si128(a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); _mm_slli_si128_impl::(a) @@ -505,7 +535,8 @@ pub fn _mm_bslli_si128(a: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(psrldq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_bsrli_si128(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_bsrli_si128(a: __m128i) -> __m128i { unsafe { static_assert_uimm_bits!(IMM8, 8); _mm_srli_si128_impl::(a) @@ -520,7 +551,8 @@ pub fn _mm_bsrli_si128(a: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(psllw, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_slli_epi16(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_slli_epi16(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { if IMM8 >= 16 { @@ -551,7 +583,8 @@ pub fn _mm_sll_epi16(a: __m128i, count: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pslld, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_slli_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_slli_epi32(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { if IMM8 >= 32 { @@ -582,7 +615,8 @@ pub fn _mm_sll_epi32(a: __m128i, count: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(psllq, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_slli_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_slli_epi64(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { if IMM8 >= 64 { @@ -614,7 +648,8 @@ pub fn _mm_sll_epi64(a: __m128i, count: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(psraw, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_srai_epi16(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srai_epi16(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { transmute(simd_shr(a.as_i16x8(), i16x8::splat(IMM8.min(15) as i16))) } } @@ -640,7 +675,8 @@ pub fn _mm_sra_epi16(a: __m128i, count: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(psrad, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_srai_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srai_epi32(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { transmute(simd_shr(a.as_i32x4(), i32x4::splat(IMM8.min(31)))) } } @@ -665,7 +701,8 @@ pub fn _mm_sra_epi32(a: __m128i, count: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(psrldq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_srli_si128(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srli_si128(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { _mm_srli_si128_impl::(a) } } @@ -674,7 +711,8 @@ pub fn _mm_srli_si128(a: __m128i) -> __m128i { /// `_mm_srli_si128` intrinsic into a compile-time constant. #[inline] #[target_feature(enable = "sse2")] -unsafe fn _mm_srli_si128_impl(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +const unsafe fn _mm_srli_si128_impl(a: __m128i) -> __m128i { const fn mask(shift: i32, i: u32) -> u32 { if (shift as u32) > 15 { i + 16 @@ -716,7 +754,8 @@ unsafe fn _mm_srli_si128_impl(a: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(psrlw, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_srli_epi16(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srli_epi16(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { if IMM8 >= 16 { @@ -748,7 +787,8 @@ pub fn _mm_srl_epi16(a: __m128i, count: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(psrld, IMM8 = 8))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_srli_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srli_epi32(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { if IMM8 >= 32 { @@ -780,7 +820,8 @@ pub fn _mm_srl_epi32(a: __m128i, count: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(psrlq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_srli_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_srli_epi64(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { if IMM8 >= 64 { @@ -811,7 +852,8 @@ pub fn _mm_srl_epi64(a: __m128i, count: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(andps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_and_si128(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_and_si128(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_and(a, b) } } @@ -823,7 +865,8 @@ pub fn _mm_and_si128(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(andnps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_andnot_si128(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_andnot_si128(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_and(simd_xor(_mm_set1_epi8(-1), a), b) } } @@ -835,7 +878,8 @@ pub fn _mm_andnot_si128(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(orps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_or_si128(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_or_si128(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_or(a, b) } } @@ -847,7 +891,8 @@ pub fn _mm_or_si128(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(xorps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_xor_si128(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_xor_si128(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_xor(a, b) } } @@ -858,7 +903,8 @@ pub fn _mm_xor_si128(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pcmpeqb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cmpeq_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epi8(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_eq(a.as_i8x16(), b.as_i8x16())) } } @@ -869,7 +915,8 @@ pub fn _mm_cmpeq_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pcmpeqw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cmpeq_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_eq(a.as_i16x8(), b.as_i16x8())) } } @@ -880,7 +927,8 @@ pub fn _mm_cmpeq_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pcmpeqd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cmpeq_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_eq(a.as_i32x4(), b.as_i32x4())) } } @@ -891,7 +939,8 @@ pub fn _mm_cmpeq_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pcmpgtb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cmpgt_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epi8(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_gt(a.as_i8x16(), b.as_i8x16())) } } @@ -902,7 +951,8 @@ pub fn _mm_cmpgt_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pcmpgtw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cmpgt_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_gt(a.as_i16x8(), b.as_i16x8())) } } @@ -913,7 +963,8 @@ pub fn _mm_cmpgt_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pcmpgtd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cmpgt_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_gt(a.as_i32x4(), b.as_i32x4())) } } @@ -924,7 +975,8 @@ pub fn _mm_cmpgt_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pcmpgtb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cmplt_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmplt_epi8(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_lt(a.as_i8x16(), b.as_i8x16())) } } @@ -935,7 +987,8 @@ pub fn _mm_cmplt_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pcmpgtw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cmplt_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmplt_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_lt(a.as_i16x8(), b.as_i16x8())) } } @@ -946,7 +999,8 @@ pub fn _mm_cmplt_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pcmpgtd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cmplt_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmplt_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_lt(a.as_i32x4(), b.as_i32x4())) } } @@ -958,7 +1012,8 @@ pub fn _mm_cmplt_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(cvtdq2pd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepi32_pd(a: __m128i) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepi32_pd(a: __m128i) -> __m128d { unsafe { let a = a.as_i32x4(); simd_cast::(simd_shuffle!(a, a, [0, 1])) @@ -973,7 +1028,8 @@ pub fn _mm_cvtepi32_pd(a: __m128i) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(cvtsi2sd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsi32_sd(a: __m128d, b: i32) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi32_sd(a: __m128d, b: i32) -> __m128d { unsafe { simd_insert!(a, 0, b as f64) } } @@ -985,7 +1041,8 @@ pub fn _mm_cvtsi32_sd(a: __m128d, b: i32) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(cvtdq2ps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepi32_ps(a: __m128i) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepi32_ps(a: __m128i) -> __m128 { unsafe { transmute(simd_cast::<_, f32x4>(a.as_i32x4())) } } @@ -1008,7 +1065,8 @@ pub fn _mm_cvtps_epi32(a: __m128) -> __m128i { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsi32_si128(a: i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi32_si128(a: i32) -> __m128i { unsafe { transmute(i32x4::new(a, 0, 0, 0)) } } @@ -1018,7 +1076,8 @@ pub fn _mm_cvtsi32_si128(a: i32) -> __m128i { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsi128_si32(a: __m128i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi128_si32(a: __m128i) -> i32 { unsafe { simd_extract!(a.as_i32x4(), 0) } } @@ -1030,7 +1089,8 @@ pub fn _mm_cvtsi128_si32(a: __m128i) -> i32 { #[target_feature(enable = "sse2")] // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set_epi64x(e1: i64, e0: i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_epi64x(e1: i64, e0: i64) -> __m128i { unsafe { transmute(i64x2::new(e0, e1)) } } @@ -1041,7 +1101,8 @@ pub fn _mm_set_epi64x(e1: i64, e0: i64) -> __m128i { #[target_feature(enable = "sse2")] // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set_epi32(e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_epi32(e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i { unsafe { transmute(i32x4::new(e0, e1, e2, e3)) } } @@ -1052,7 +1113,8 @@ pub fn _mm_set_epi32(e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i { #[target_feature(enable = "sse2")] // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_epi16( e7: i16, e6: i16, e5: i16, @@ -1072,7 +1134,8 @@ pub fn _mm_set_epi16( #[target_feature(enable = "sse2")] // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set_epi8( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_epi8( e15: i8, e14: i8, e13: i8, @@ -1105,7 +1168,8 @@ pub fn _mm_set_epi8( #[target_feature(enable = "sse2")] // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set1_epi64x(a: i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set1_epi64x(a: i64) -> __m128i { _mm_set_epi64x(a, a) } @@ -1116,7 +1180,8 @@ pub fn _mm_set1_epi64x(a: i64) -> __m128i { #[target_feature(enable = "sse2")] // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set1_epi32(a: i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set1_epi32(a: i32) -> __m128i { _mm_set_epi32(a, a, a, a) } @@ -1127,7 +1192,8 @@ pub fn _mm_set1_epi32(a: i32) -> __m128i { #[target_feature(enable = "sse2")] // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set1_epi16(a: i16) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set1_epi16(a: i16) -> __m128i { _mm_set_epi16(a, a, a, a, a, a, a, a) } @@ -1138,7 +1204,8 @@ pub fn _mm_set1_epi16(a: i16) -> __m128i { #[target_feature(enable = "sse2")] // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set1_epi8(a: i8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set1_epi8(a: i8) -> __m128i { _mm_set_epi8(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a) } @@ -1149,7 +1216,8 @@ pub fn _mm_set1_epi8(a: i8) -> __m128i { #[target_feature(enable = "sse2")] // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_setr_epi32(e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_setr_epi32(e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i { _mm_set_epi32(e0, e1, e2, e3) } @@ -1160,7 +1228,8 @@ pub fn _mm_setr_epi32(e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i { #[target_feature(enable = "sse2")] // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_setr_epi16( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_setr_epi16( e7: i16, e6: i16, e5: i16, @@ -1180,7 +1249,8 @@ pub fn _mm_setr_epi16( #[target_feature(enable = "sse2")] // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_setr_epi8( +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_setr_epi8( e15: i8, e14: i8, e13: i8, @@ -1211,7 +1281,8 @@ pub fn _mm_setr_epi8( #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(xorps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_setzero_si128() -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_setzero_si128() -> __m128i { const { unsafe { mem::zeroed() } } } @@ -1221,7 +1292,8 @@ pub fn _mm_setzero_si128() -> __m128i { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_loadl_epi64(mem_addr: *const __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadl_epi64(mem_addr: *const __m128i) -> __m128i { _mm_set_epi64x(0, ptr::read_unaligned(mem_addr as *const i64)) } @@ -1237,7 +1309,8 @@ pub unsafe fn _mm_loadl_epi64(mem_addr: *const __m128i) -> __m128i { assert_instr(movaps) )] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_load_si128(mem_addr: *const __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load_si128(mem_addr: *const __m128i) -> __m128i { *mem_addr } @@ -1250,7 +1323,8 @@ pub unsafe fn _mm_load_si128(mem_addr: *const __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movups))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_loadu_si128(mem_addr: *const __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadu_si128(mem_addr: *const __m128i) -> __m128i { let mut dst: __m128i = _mm_undefined_si128(); ptr::copy_nonoverlapping( mem_addr as *const u8, @@ -1299,7 +1373,8 @@ pub unsafe fn _mm_maskmoveu_si128(a: __m128i, mask: __m128i, mem_addr: *mut i8) assert_instr(movaps) )] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_store_si128(mem_addr: *mut __m128i, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store_si128(mem_addr: *mut __m128i, a: __m128i) { *mem_addr = a; } @@ -1312,7 +1387,8 @@ pub unsafe fn _mm_store_si128(mem_addr: *mut __m128i, a: __m128i) { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movups))] // FIXME movdqu expected #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_storeu_si128(mem_addr: *mut __m128i, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeu_si128(mem_addr: *mut __m128i, a: __m128i) { mem_addr.write_unaligned(a); } @@ -1324,7 +1400,8 @@ pub unsafe fn _mm_storeu_si128(mem_addr: *mut __m128i, a: __m128i) { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_storel_epi64(mem_addr: *mut __m128i, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storel_epi64(mem_addr: *mut __m128i, a: __m128i) { ptr::copy_nonoverlapping(ptr::addr_of!(a) as *const u8, mem_addr as *mut u8, 8); } @@ -1393,7 +1470,8 @@ pub unsafe fn _mm_stream_si32(mem_addr: *mut i32, a: i32) { // FIXME movd on msvc, movd on i686 #[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(movq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_move_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_move_epi64(a: __m128i) -> __m128i { unsafe { let r: i64x2 = simd_shuffle!(a.as_i64x2(), i64x2::ZERO, [0, 2]); transmute(r) @@ -1444,7 +1522,8 @@ pub fn _mm_packus_epi16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pextrw, IMM8 = 7))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_extract_epi16(a: __m128i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_extract_epi16(a: __m128i) -> i32 { static_assert_uimm_bits!(IMM8, 3); unsafe { simd_extract!(a.as_u16x8(), IMM8 as u32, u16) as i32 } } @@ -1457,7 +1536,8 @@ pub fn _mm_extract_epi16(a: __m128i) -> i32 { #[cfg_attr(test, assert_instr(pinsrw, IMM8 = 7))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_insert_epi16(a: __m128i, i: i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_insert_epi16(a: __m128i, i: i32) -> __m128i { static_assert_uimm_bits!(IMM8, 3); unsafe { transmute(simd_insert!(a.as_i16x8(), IMM8 as u32, i as i16)) } } @@ -1469,7 +1549,8 @@ pub fn _mm_insert_epi16(a: __m128i, i: i32) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(pmovmskb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_movemask_epi8(a: __m128i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movemask_epi8(a: __m128i) -> i32 { unsafe { let z = i8x16::ZERO; let m: i8x16 = simd_lt(a.as_i8x16(), z); @@ -1485,7 +1566,8 @@ pub fn _mm_movemask_epi8(a: __m128i) -> i32 { #[cfg_attr(test, assert_instr(pshufd, IMM8 = 9))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_shuffle_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shuffle_epi32(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { let a = a.as_i32x4(); @@ -1515,7 +1597,8 @@ pub fn _mm_shuffle_epi32(a: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pshufhw, IMM8 = 9))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_shufflehi_epi16(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shufflehi_epi16(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { let a = a.as_i16x8(); @@ -1549,7 +1632,8 @@ pub fn _mm_shufflehi_epi16(a: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pshuflw, IMM8 = 9))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_shufflelo_epi16(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shufflelo_epi16(a: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { let a = a.as_i16x8(); @@ -1578,7 +1662,8 @@ pub fn _mm_shufflelo_epi16(a: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(punpckhbw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpackhi_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpackhi_epi8(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_shuffle!( a.as_i8x16(), @@ -1595,7 +1680,8 @@ pub fn _mm_unpackhi_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(punpckhwd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpackhi_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpackhi_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { let x = simd_shuffle!(a.as_i16x8(), b.as_i16x8(), [4, 12, 5, 13, 6, 14, 7, 15]); transmute::(x) @@ -1609,7 +1695,8 @@ pub fn _mm_unpackhi_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(unpckhps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpackhi_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpackhi_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_shuffle!(a.as_i32x4(), b.as_i32x4(), [2, 6, 3, 7])) } } @@ -1620,7 +1707,8 @@ pub fn _mm_unpackhi_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(unpckhpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpackhi_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpackhi_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_shuffle!(a.as_i64x2(), b.as_i64x2(), [1, 3])) } } @@ -1631,7 +1719,8 @@ pub fn _mm_unpackhi_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(punpcklbw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpacklo_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpacklo_epi8(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_shuffle!( a.as_i8x16(), @@ -1648,7 +1737,8 @@ pub fn _mm_unpacklo_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(punpcklwd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpacklo_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpacklo_epi16(a: __m128i, b: __m128i) -> __m128i { unsafe { let x = simd_shuffle!(a.as_i16x8(), b.as_i16x8(), [0, 8, 1, 9, 2, 10, 3, 11]); transmute::(x) @@ -1662,7 +1752,8 @@ pub fn _mm_unpacklo_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(unpcklps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpacklo_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpacklo_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_shuffle!(a.as_i32x4(), b.as_i32x4(), [0, 4, 1, 5])) } } @@ -1673,7 +1764,8 @@ pub fn _mm_unpacklo_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movlhps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpacklo_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpacklo_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute::(simd_shuffle!(a.as_i64x2(), b.as_i64x2(), [0, 2])) } } @@ -1685,7 +1777,8 @@ pub fn _mm_unpacklo_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(addsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_add_sd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_add_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { simd_insert!(a, 0, _mm_cvtsd_f64(a) + _mm_cvtsd_f64(b)) } } @@ -1697,7 +1790,8 @@ pub fn _mm_add_sd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(addpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_add_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_add_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { simd_add(a, b) } } @@ -1709,7 +1803,8 @@ pub fn _mm_add_pd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(divsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_div_sd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_div_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { simd_insert!(a, 0, _mm_cvtsd_f64(a) / _mm_cvtsd_f64(b)) } } @@ -1721,7 +1816,8 @@ pub fn _mm_div_sd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(divpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_div_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_div_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { simd_div(a, b) } } @@ -1781,7 +1877,8 @@ pub fn _mm_min_pd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(mulsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_mul_sd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mul_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { simd_insert!(a, 0, _mm_cvtsd_f64(a) * _mm_cvtsd_f64(b)) } } @@ -1793,7 +1890,8 @@ pub fn _mm_mul_sd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(mulpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_mul_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mul_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { simd_mul(a, b) } } @@ -1828,7 +1926,8 @@ pub fn _mm_sqrt_pd(a: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(subsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_sub_sd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sub_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { simd_insert!(a, 0, _mm_cvtsd_f64(a) - _mm_cvtsd_f64(b)) } } @@ -1840,7 +1939,8 @@ pub fn _mm_sub_sd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(subpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_sub_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_sub_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { simd_sub(a, b) } } @@ -1852,7 +1952,8 @@ pub fn _mm_sub_pd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(andps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_and_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_and_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { let a: __m128i = transmute(a); let b: __m128i = transmute(b); @@ -1867,7 +1968,8 @@ pub fn _mm_and_pd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(andnps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_andnot_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_andnot_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { let a: __m128i = transmute(a); let b: __m128i = transmute(b); @@ -1882,7 +1984,8 @@ pub fn _mm_andnot_pd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(orps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_or_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_or_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { let a: __m128i = transmute(a); let b: __m128i = transmute(b); @@ -1897,7 +2000,8 @@ pub fn _mm_or_pd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(xorps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_xor_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_xor_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { let a: __m128i = transmute(a); let b: __m128i = transmute(b); @@ -2325,7 +2429,8 @@ pub fn _mm_ucomineq_sd(a: __m128d, b: __m128d) -> i32 { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(cvtpd2ps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtpd_ps(a: __m128d) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtpd_ps(a: __m128d) -> __m128 { unsafe { let r = simd_cast::<_, f32x2>(a.as_f64x2()); let zero = f32x2::ZERO; @@ -2342,7 +2447,8 @@ pub fn _mm_cvtpd_ps(a: __m128d) -> __m128 { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(cvtps2pd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtps_pd(a: __m128) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtps_pd(a: __m128) -> __m128d { unsafe { let a = a.as_f32x4(); transmute(simd_cast::(simd_shuffle!(a, a, [0, 1]))) @@ -2393,7 +2499,8 @@ pub fn _mm_cvtsd_ss(a: __m128, b: __m128d) -> __m128 { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsd_f64(a: __m128d) -> f64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsd_f64(a: __m128d) -> f64 { unsafe { simd_extract!(a, 0) } } @@ -2407,7 +2514,8 @@ pub fn _mm_cvtsd_f64(a: __m128d) -> f64 { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(cvtss2sd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtss_sd(a: __m128d, b: __m128) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtss_sd(a: __m128d, b: __m128) -> __m128d { unsafe { let elt: f32 = simd_extract!(b, 0); simd_insert!(a, 0, elt as f64) @@ -2457,7 +2565,8 @@ pub fn _mm_cvttps_epi32(a: __m128) -> __m128i { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set_sd(a: f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_sd(a: f64) -> __m128d { _mm_set_pd(0.0, a) } @@ -2468,7 +2577,8 @@ pub fn _mm_set_sd(a: f64) -> __m128d { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set1_pd(a: f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set1_pd(a: f64) -> __m128d { _mm_set_pd(a, a) } @@ -2479,7 +2589,8 @@ pub fn _mm_set1_pd(a: f64) -> __m128d { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set_pd1(a: f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_pd1(a: f64) -> __m128d { _mm_set_pd(a, a) } @@ -2490,7 +2601,8 @@ pub fn _mm_set_pd1(a: f64) -> __m128d { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_set_pd(a: f64, b: f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_set_pd(a: f64, b: f64) -> __m128d { __m128d([b, a]) } @@ -2501,7 +2613,8 @@ pub fn _mm_set_pd(a: f64, b: f64) -> __m128d { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_setr_pd(a: f64, b: f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_setr_pd(a: f64, b: f64) -> __m128d { _mm_set_pd(b, a) } @@ -2513,7 +2626,8 @@ pub fn _mm_setr_pd(a: f64, b: f64) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(xorp))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_setzero_pd() -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_setzero_pd() -> __m128d { const { unsafe { mem::zeroed() } } } @@ -2527,12 +2641,13 @@ pub fn _mm_setzero_pd() -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movmskpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_movemask_pd(a: __m128d) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movemask_pd(a: __m128d) -> i32 { // Propagate the highest bit to the rest, because simd_bitmask // requires all-1 or all-0. unsafe { let mask: i64x2 = simd_lt(transmute(a), i64x2::ZERO); - simd_bitmask::(mask).into() + simd_bitmask::(mask) as i32 } } @@ -2550,7 +2665,8 @@ pub fn _mm_movemask_pd(a: __m128d) -> i32 { )] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm_load_pd(mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load_pd(mem_addr: *const f64) -> __m128d { *(mem_addr as *const __m128d) } @@ -2562,7 +2678,8 @@ pub unsafe fn _mm_load_pd(mem_addr: *const f64) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_load_sd(mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load_sd(mem_addr: *const f64) -> __m128d { _mm_setr_pd(*mem_addr, 0.) } @@ -2575,7 +2692,8 @@ pub unsafe fn _mm_load_sd(mem_addr: *const f64) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movhps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_loadh_pd(a: __m128d, mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadh_pd(a: __m128d, mem_addr: *const f64) -> __m128d { _mm_setr_pd(simd_extract!(a, 0), *mem_addr) } @@ -2588,7 +2706,8 @@ pub unsafe fn _mm_loadh_pd(a: __m128d, mem_addr: *const f64) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movlps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_loadl_pd(a: __m128d, mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadl_pd(a: __m128d, mem_addr: *const f64) -> __m128d { _mm_setr_pd(*mem_addr, simd_extract!(a, 1)) } @@ -2630,7 +2749,8 @@ pub unsafe fn _mm_stream_pd(mem_addr: *mut f64, a: __m128d) { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movlps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_store_sd(mem_addr: *mut f64, a: __m128d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store_sd(mem_addr: *mut f64, a: __m128d) { *mem_addr = simd_extract!(a, 0) } @@ -2647,7 +2767,8 @@ pub unsafe fn _mm_store_sd(mem_addr: *mut f64, a: __m128d) { )] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm_store_pd(mem_addr: *mut f64, a: __m128d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store_pd(mem_addr: *mut f64, a: __m128d) { *(mem_addr as *mut __m128d) = a; } @@ -2660,7 +2781,8 @@ pub unsafe fn _mm_store_pd(mem_addr: *mut f64, a: __m128d) { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movups))] // FIXME movupd expected #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_storeu_pd(mem_addr: *mut f64, a: __m128d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeu_pd(mem_addr: *mut f64, a: __m128d) { mem_addr.cast::<__m128d>().write_unaligned(a); } @@ -2672,7 +2794,8 @@ pub unsafe fn _mm_storeu_pd(mem_addr: *mut f64, a: __m128d) { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86_updates", since = "1.82.0")] -pub unsafe fn _mm_storeu_si16(mem_addr: *mut u8, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeu_si16(mem_addr: *mut u8, a: __m128i) { ptr::write_unaligned(mem_addr as *mut i16, simd_extract(a.as_i16x8(), 0)) } @@ -2684,7 +2807,8 @@ pub unsafe fn _mm_storeu_si16(mem_addr: *mut u8, a: __m128i) { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86_updates", since = "1.82.0")] -pub unsafe fn _mm_storeu_si32(mem_addr: *mut u8, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeu_si32(mem_addr: *mut u8, a: __m128i) { ptr::write_unaligned(mem_addr as *mut i32, simd_extract(a.as_i32x4(), 0)) } @@ -2696,7 +2820,8 @@ pub unsafe fn _mm_storeu_si32(mem_addr: *mut u8, a: __m128i) { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86_updates", since = "1.82.0")] -pub unsafe fn _mm_storeu_si64(mem_addr: *mut u8, a: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeu_si64(mem_addr: *mut u8, a: __m128i) { ptr::write_unaligned(mem_addr as *mut i64, simd_extract(a.as_i64x2(), 0)) } @@ -2709,7 +2834,8 @@ pub unsafe fn _mm_storeu_si64(mem_addr: *mut u8, a: __m128i) { #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm_store1_pd(mem_addr: *mut f64, a: __m128d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store1_pd(mem_addr: *mut f64, a: __m128d) { let b: __m128d = simd_shuffle!(a, a, [0, 0]); *(mem_addr as *mut __m128d) = b; } @@ -2723,7 +2849,8 @@ pub unsafe fn _mm_store1_pd(mem_addr: *mut f64, a: __m128d) { #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm_store_pd1(mem_addr: *mut f64, a: __m128d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_store_pd1(mem_addr: *mut f64, a: __m128d) { let b: __m128d = simd_shuffle!(a, a, [0, 0]); *(mem_addr as *mut __m128d) = b; } @@ -2738,7 +2865,8 @@ pub unsafe fn _mm_store_pd1(mem_addr: *mut f64, a: __m128d) { #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] -pub unsafe fn _mm_storer_pd(mem_addr: *mut f64, a: __m128d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storer_pd(mem_addr: *mut f64, a: __m128d) { let b: __m128d = simd_shuffle!(a, a, [1, 0]); *(mem_addr as *mut __m128d) = b; } @@ -2751,7 +2879,8 @@ pub unsafe fn _mm_storer_pd(mem_addr: *mut f64, a: __m128d) { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movhps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_storeh_pd(mem_addr: *mut f64, a: __m128d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storeh_pd(mem_addr: *mut f64, a: __m128d) { *mem_addr = simd_extract!(a, 1); } @@ -2763,7 +2892,8 @@ pub unsafe fn _mm_storeh_pd(mem_addr: *mut f64, a: __m128d) { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movlps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_storel_pd(mem_addr: *mut f64, a: __m128d) { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_storel_pd(mem_addr: *mut f64, a: __m128d) { *mem_addr = simd_extract!(a, 0); } @@ -2775,7 +2905,8 @@ pub unsafe fn _mm_storel_pd(mem_addr: *mut f64, a: __m128d) { #[target_feature(enable = "sse2")] // #[cfg_attr(test, assert_instr(movapd))] // FIXME LLVM uses different codegen #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_load1_pd(mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load1_pd(mem_addr: *const f64) -> __m128d { let d = *mem_addr; _mm_setr_pd(d, d) } @@ -2788,7 +2919,8 @@ pub unsafe fn _mm_load1_pd(mem_addr: *const f64) -> __m128d { #[target_feature(enable = "sse2")] // #[cfg_attr(test, assert_instr(movapd))] // FIXME same as _mm_load1_pd #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_load_pd1(mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_load_pd1(mem_addr: *const f64) -> __m128d { _mm_load1_pd(mem_addr) } @@ -2804,7 +2936,8 @@ pub unsafe fn _mm_load_pd1(mem_addr: *const f64) -> __m128d { assert_instr(movaps) )] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_loadr_pd(mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadr_pd(mem_addr: *const f64) -> __m128d { let a = _mm_load_pd(mem_addr); simd_shuffle!(a, a, [1, 0]) } @@ -2818,7 +2951,8 @@ pub unsafe fn _mm_loadr_pd(mem_addr: *const f64) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movups))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_loadu_pd(mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadu_pd(mem_addr: *const f64) -> __m128d { let mut dst = _mm_undefined_pd(); ptr::copy_nonoverlapping( mem_addr as *const u8, @@ -2836,7 +2970,8 @@ pub unsafe fn _mm_loadu_pd(mem_addr: *const f64) -> __m128d { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86_updates", since = "1.82.0")] -pub unsafe fn _mm_loadu_si16(mem_addr: *const u8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadu_si16(mem_addr: *const u8) -> __m128i { transmute(i16x8::new( ptr::read_unaligned(mem_addr as *const i16), 0, @@ -2857,7 +2992,8 @@ pub unsafe fn _mm_loadu_si16(mem_addr: *const u8) -> __m128i { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86_updates", since = "1.82.0")] -pub unsafe fn _mm_loadu_si32(mem_addr: *const u8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadu_si32(mem_addr: *const u8) -> __m128i { transmute(i32x4::new( ptr::read_unaligned(mem_addr as *const i32), 0, @@ -2874,7 +3010,8 @@ pub unsafe fn _mm_loadu_si32(mem_addr: *const u8) -> __m128i { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86_mm_loadu_si64", since = "1.46.0")] -pub unsafe fn _mm_loadu_si64(mem_addr: *const u8) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loadu_si64(mem_addr: *const u8) -> __m128i { transmute(i64x2::new(ptr::read_unaligned(mem_addr as *const i64), 0)) } @@ -2888,7 +3025,8 @@ pub unsafe fn _mm_loadu_si64(mem_addr: *const u8) -> __m128i { #[cfg_attr(test, assert_instr(shufps, MASK = 2))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_shuffle_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_shuffle_pd(a: __m128d, b: __m128d) -> __m128d { static_assert_uimm_bits!(MASK, 8); unsafe { simd_shuffle!(a, b, [MASK as u32 & 0b1, ((MASK as u32 >> 1) & 0b1) + 2]) } } @@ -2902,7 +3040,8 @@ pub fn _mm_shuffle_pd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_move_sd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_move_sd(a: __m128d, b: __m128d) -> __m128d { unsafe { _mm_setr_pd(simd_extract!(b, 0), simd_extract!(a, 1)) } } @@ -2913,7 +3052,8 @@ pub fn _mm_move_sd(a: __m128d, b: __m128d) -> __m128d { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_castpd_ps(a: __m128d) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castpd_ps(a: __m128d) -> __m128 { unsafe { transmute(a) } } @@ -2924,7 +3064,8 @@ pub fn _mm_castpd_ps(a: __m128d) -> __m128 { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_castpd_si128(a: __m128d) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castpd_si128(a: __m128d) -> __m128i { unsafe { transmute(a) } } @@ -2935,7 +3076,8 @@ pub fn _mm_castpd_si128(a: __m128d) -> __m128i { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_castps_pd(a: __m128) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castps_pd(a: __m128) -> __m128d { unsafe { transmute(a) } } @@ -2946,7 +3088,8 @@ pub fn _mm_castps_pd(a: __m128) -> __m128d { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_castps_si128(a: __m128) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castps_si128(a: __m128) -> __m128i { unsafe { transmute(a) } } @@ -2957,7 +3100,8 @@ pub fn _mm_castps_si128(a: __m128) -> __m128i { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_castsi128_pd(a: __m128i) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castsi128_pd(a: __m128i) -> __m128d { unsafe { transmute(a) } } @@ -2968,7 +3112,8 @@ pub fn _mm_castsi128_pd(a: __m128i) -> __m128d { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_castsi128_ps(a: __m128i) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_castsi128_ps(a: __m128i) -> __m128 { unsafe { transmute(a) } } @@ -2981,7 +3126,8 @@ pub fn _mm_castsi128_ps(a: __m128i) -> __m128 { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_undefined_pd() -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_undefined_pd() -> __m128d { const { unsafe { mem::zeroed() } } } @@ -2994,7 +3140,8 @@ pub fn _mm_undefined_pd() -> __m128d { #[inline] #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_undefined_si128() -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_undefined_si128() -> __m128i { const { unsafe { mem::zeroed() } } } @@ -3009,7 +3156,8 @@ pub fn _mm_undefined_si128() -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(unpckhpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpackhi_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpackhi_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { simd_shuffle!(a, b, [1, 3]) } } @@ -3024,7 +3172,8 @@ pub fn _mm_unpackhi_pd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movlhps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_unpacklo_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_unpacklo_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { simd_shuffle!(a, b, [0, 2]) } } @@ -3118,6 +3267,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use crate::{ core_arch::{simd::*, x86::*}, hint::black_box, @@ -3157,7 +3307,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_add_epi8() { + const unsafe fn test_mm_add_epi8() { let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); #[rustfmt::skip] let b = _mm_setr_epi8( @@ -3180,7 +3330,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_add_epi16() { + const unsafe fn test_mm_add_epi16() { let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_setr_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_add_epi16(a, b); @@ -3189,7 +3339,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_add_epi32() { + const unsafe fn test_mm_add_epi32() { let a = _mm_setr_epi32(0, 1, 2, 3); let b = _mm_setr_epi32(4, 5, 6, 7); let r = _mm_add_epi32(a, b); @@ -3198,7 +3348,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_add_epi64() { + const unsafe fn test_mm_add_epi64() { let a = _mm_setr_epi64x(0, 1); let b = _mm_setr_epi64x(2, 3); let r = _mm_add_epi64(a, b); @@ -3207,7 +3357,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_adds_epi8() { + const unsafe fn test_mm_adds_epi8() { let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); #[rustfmt::skip] let b = _mm_setr_epi8( @@ -3238,7 +3388,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_adds_epi16() { + const unsafe fn test_mm_adds_epi16() { let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_setr_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_adds_epi16(a, b); @@ -3263,7 +3413,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_adds_epu8() { + const unsafe fn test_mm_adds_epu8() { let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); #[rustfmt::skip] let b = _mm_setr_epi8( @@ -3286,7 +3436,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_adds_epu16() { + const unsafe fn test_mm_adds_epu16() { let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_setr_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_adds_epu16(a, b); @@ -3303,21 +3453,21 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_avg_epu8() { + const unsafe fn test_mm_avg_epu8() { let (a, b) = (_mm_set1_epi8(3), _mm_set1_epi8(9)); let r = _mm_avg_epu8(a, b); assert_eq_m128i(r, _mm_set1_epi8(6)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_avg_epu16() { + const unsafe fn test_mm_avg_epu16() { let (a, b) = (_mm_set1_epi16(3), _mm_set1_epi16(9)); let r = _mm_avg_epu16(a, b); assert_eq_m128i(r, _mm_set1_epi16(6)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_madd_epi16() { + const unsafe fn test_mm_madd_epi16() { let a = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_setr_epi16(9, 10, 11, 12, 13, 14, 15, 16); let r = _mm_madd_epi16(a, b); @@ -3352,7 +3502,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_max_epi16() { + const unsafe fn test_mm_max_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(-1); let r = _mm_max_epi16(a, b); @@ -3360,7 +3510,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_max_epu8() { + const unsafe fn test_mm_max_epu8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(!0); let r = _mm_max_epu8(a, b); @@ -3368,7 +3518,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_min_epi16() { + const unsafe fn test_mm_min_epi16() { let a = _mm_set1_epi16(1); let b = _mm_set1_epi16(-1); let r = _mm_min_epi16(a, b); @@ -3376,7 +3526,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_min_epu8() { + const unsafe fn test_mm_min_epu8() { let a = _mm_set1_epi8(1); let b = _mm_set1_epi8(!0); let r = _mm_min_epu8(a, b); @@ -3384,28 +3534,28 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_mulhi_epi16() { + const unsafe fn test_mm_mulhi_epi16() { let (a, b) = (_mm_set1_epi16(1000), _mm_set1_epi16(-1001)); let r = _mm_mulhi_epi16(a, b); assert_eq_m128i(r, _mm_set1_epi16(-16)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_mulhi_epu16() { + const unsafe fn test_mm_mulhi_epu16() { let (a, b) = (_mm_set1_epi16(1000), _mm_set1_epi16(1001)); let r = _mm_mulhi_epu16(a, b); assert_eq_m128i(r, _mm_set1_epi16(15)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_mullo_epi16() { + const unsafe fn test_mm_mullo_epi16() { let (a, b) = (_mm_set1_epi16(1000), _mm_set1_epi16(-1001)); let r = _mm_mullo_epi16(a, b); assert_eq_m128i(r, _mm_set1_epi16(-17960)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_mul_epu32() { + const unsafe fn test_mm_mul_epu32() { let a = _mm_setr_epi64x(1_000_000_000, 1 << 34); let b = _mm_setr_epi64x(1_000_000_000, 1 << 35); let r = _mm_mul_epu32(a, b); @@ -3429,35 +3579,35 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_sub_epi8() { + const unsafe fn test_mm_sub_epi8() { let (a, b) = (_mm_set1_epi8(5), _mm_set1_epi8(6)); let r = _mm_sub_epi8(a, b); assert_eq_m128i(r, _mm_set1_epi8(-1)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_sub_epi16() { + const unsafe fn test_mm_sub_epi16() { let (a, b) = (_mm_set1_epi16(5), _mm_set1_epi16(6)); let r = _mm_sub_epi16(a, b); assert_eq_m128i(r, _mm_set1_epi16(-1)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_sub_epi32() { + const unsafe fn test_mm_sub_epi32() { let (a, b) = (_mm_set1_epi32(5), _mm_set1_epi32(6)); let r = _mm_sub_epi32(a, b); assert_eq_m128i(r, _mm_set1_epi32(-1)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_sub_epi64() { + const unsafe fn test_mm_sub_epi64() { let (a, b) = (_mm_set1_epi64x(5), _mm_set1_epi64x(6)); let r = _mm_sub_epi64(a, b); assert_eq_m128i(r, _mm_set1_epi64x(-1)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_subs_epi8() { + const unsafe fn test_mm_subs_epi8() { let (a, b) = (_mm_set1_epi8(5), _mm_set1_epi8(2)); let r = _mm_subs_epi8(a, b); assert_eq_m128i(r, _mm_set1_epi8(3)); @@ -3480,7 +3630,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_subs_epi16() { + const unsafe fn test_mm_subs_epi16() { let (a, b) = (_mm_set1_epi16(5), _mm_set1_epi16(2)); let r = _mm_subs_epi16(a, b); assert_eq_m128i(r, _mm_set1_epi16(3)); @@ -3503,7 +3653,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_subs_epu8() { + const unsafe fn test_mm_subs_epu8() { let (a, b) = (_mm_set1_epi8(5), _mm_set1_epi8(2)); let r = _mm_subs_epu8(a, b); assert_eq_m128i(r, _mm_set1_epi8(3)); @@ -3518,7 +3668,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_subs_epu16() { + const unsafe fn test_mm_subs_epu16() { let (a, b) = (_mm_set1_epi16(5), _mm_set1_epi16(2)); let r = _mm_subs_epu16(a, b); assert_eq_m128i(r, _mm_set1_epi16(3)); @@ -3533,7 +3683,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_slli_si128() { + const unsafe fn test_mm_slli_si128() { #[rustfmt::skip] let a = _mm_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, @@ -3559,7 +3709,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_slli_epi16() { + const unsafe fn test_mm_slli_epi16() { let a = _mm_setr_epi16(0xCC, -0xCC, 0xDD, -0xDD, 0xEE, -0xEE, 0xFF, -0xFF); let r = _mm_slli_epi16::<4>(a); assert_eq_m128i( @@ -3587,7 +3737,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_slli_epi32() { + const unsafe fn test_mm_slli_epi32() { let a = _mm_setr_epi32(0xEEEE, -0xEEEE, 0xFFFF, -0xFFFF); let r = _mm_slli_epi32::<4>(a); assert_eq_m128i(r, _mm_setr_epi32(0xEEEE0, -0xEEEE0, 0xFFFF0, -0xFFFF0)); @@ -3609,7 +3759,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_slli_epi64() { + const unsafe fn test_mm_slli_epi64() { let a = _mm_set_epi64x(0xFFFFFFFF, -0xFFFFFFFF); let r = _mm_slli_epi64::<4>(a); assert_eq_m128i(r, _mm_set_epi64x(0xFFFFFFFF0, -0xFFFFFFFF0)); @@ -3631,7 +3781,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_srai_epi16() { + const unsafe fn test_mm_srai_epi16() { let a = _mm_setr_epi16(0xCC, -0xCC, 0xDD, -0xDD, 0xEE, -0xEE, 0xFF, -0xFF); let r = _mm_srai_epi16::<4>(a); assert_eq_m128i( @@ -3659,7 +3809,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_srai_epi32() { + const unsafe fn test_mm_srai_epi32() { let a = _mm_setr_epi32(0xEEEE, -0xEEEE, 0xFFFF, -0xFFFF); let r = _mm_srai_epi32::<4>(a); assert_eq_m128i(r, _mm_setr_epi32(0xEEE, -0xEEF, 0xFFF, -0x1000)); @@ -3681,7 +3831,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_srli_si128() { + const unsafe fn test_mm_srli_si128() { #[rustfmt::skip] let a = _mm_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, @@ -3710,7 +3860,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_srli_epi16() { + const unsafe fn test_mm_srli_epi16() { let a = _mm_setr_epi16(0xCC, -0xCC, 0xDD, -0xDD, 0xEE, -0xEE, 0xFF, -0xFF); let r = _mm_srli_epi16::<4>(a); assert_eq_m128i( @@ -3738,7 +3888,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_srli_epi32() { + const unsafe fn test_mm_srli_epi32() { let a = _mm_setr_epi32(0xEEEE, -0xEEEE, 0xFFFF, -0xFFFF); let r = _mm_srli_epi32::<4>(a); assert_eq_m128i(r, _mm_setr_epi32(0xEEE, 0xFFFF111, 0xFFF, 0xFFFF000)); @@ -3760,7 +3910,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_srli_epi64() { + const unsafe fn test_mm_srli_epi64() { let a = _mm_set_epi64x(0xFFFFFFFF, -0xFFFFFFFF); let r = _mm_srli_epi64::<4>(a); assert_eq_m128i(r, _mm_set_epi64x(0xFFFFFFF, 0xFFFFFFFF0000000)); @@ -3782,7 +3932,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_and_si128() { + const unsafe fn test_mm_and_si128() { let a = _mm_set1_epi8(5); let b = _mm_set1_epi8(3); let r = _mm_and_si128(a, b); @@ -3790,7 +3940,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_andnot_si128() { + const unsafe fn test_mm_andnot_si128() { let a = _mm_set1_epi8(5); let b = _mm_set1_epi8(3); let r = _mm_andnot_si128(a, b); @@ -3798,7 +3948,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_or_si128() { + const unsafe fn test_mm_or_si128() { let a = _mm_set1_epi8(5); let b = _mm_set1_epi8(3); let r = _mm_or_si128(a, b); @@ -3806,7 +3956,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_xor_si128() { + const unsafe fn test_mm_xor_si128() { let a = _mm_set1_epi8(5); let b = _mm_set1_epi8(3); let r = _mm_xor_si128(a, b); @@ -3814,7 +3964,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cmpeq_epi8() { + const unsafe fn test_mm_cmpeq_epi8() { let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let b = _mm_setr_epi8(15, 14, 2, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); let r = _mm_cmpeq_epi8(a, b); @@ -3828,7 +3978,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cmpeq_epi16() { + const unsafe fn test_mm_cmpeq_epi16() { let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_setr_epi16(7, 6, 2, 4, 3, 2, 1, 0); let r = _mm_cmpeq_epi16(a, b); @@ -3836,7 +3986,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cmpeq_epi32() { + const unsafe fn test_mm_cmpeq_epi32() { let a = _mm_setr_epi32(0, 1, 2, 3); let b = _mm_setr_epi32(3, 2, 2, 0); let r = _mm_cmpeq_epi32(a, b); @@ -3844,7 +3994,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cmpgt_epi8() { + const unsafe fn test_mm_cmpgt_epi8() { let a = _mm_set_epi8(5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); let b = _mm_set1_epi8(0); let r = _mm_cmpgt_epi8(a, b); @@ -3853,7 +4003,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cmpgt_epi16() { + const unsafe fn test_mm_cmpgt_epi16() { let a = _mm_set_epi16(5, 0, 0, 0, 0, 0, 0, 0); let b = _mm_set1_epi16(0); let r = _mm_cmpgt_epi16(a, b); @@ -3862,7 +4012,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cmpgt_epi32() { + const unsafe fn test_mm_cmpgt_epi32() { let a = _mm_set_epi32(5, 0, 0, 0); let b = _mm_set1_epi32(0); let r = _mm_cmpgt_epi32(a, b); @@ -3870,7 +4020,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cmplt_epi8() { + const unsafe fn test_mm_cmplt_epi8() { let a = _mm_set1_epi8(0); let b = _mm_set_epi8(5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); let r = _mm_cmplt_epi8(a, b); @@ -3879,7 +4029,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cmplt_epi16() { + const unsafe fn test_mm_cmplt_epi16() { let a = _mm_set1_epi16(0); let b = _mm_set_epi16(5, 0, 0, 0, 0, 0, 0, 0); let r = _mm_cmplt_epi16(a, b); @@ -3888,7 +4038,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cmplt_epi32() { + const unsafe fn test_mm_cmplt_epi32() { let a = _mm_set1_epi32(0); let b = _mm_set_epi32(5, 0, 0, 0); let r = _mm_cmplt_epi32(a, b); @@ -3896,21 +4046,21 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtepi32_pd() { + const unsafe fn test_mm_cvtepi32_pd() { let a = _mm_set_epi32(35, 25, 15, 5); let r = _mm_cvtepi32_pd(a); assert_eq_m128d(r, _mm_setr_pd(5.0, 15.0)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtsi32_sd() { + const unsafe fn test_mm_cvtsi32_sd() { let a = _mm_set1_pd(3.5); let r = _mm_cvtsi32_sd(a, 5); assert_eq_m128d(r, _mm_setr_pd(5.0, 3.5)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtepi32_ps() { + const unsafe fn test_mm_cvtepi32_ps() { let a = _mm_setr_epi32(1, 2, 3, 4); let r = _mm_cvtepi32_ps(a); assert_eq_m128(r, _mm_setr_ps(1.0, 2.0, 3.0, 4.0)); @@ -3924,37 +4074,37 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtsi32_si128() { + const unsafe fn test_mm_cvtsi32_si128() { let r = _mm_cvtsi32_si128(5); assert_eq_m128i(r, _mm_setr_epi32(5, 0, 0, 0)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtsi128_si32() { + const unsafe fn test_mm_cvtsi128_si32() { let r = _mm_cvtsi128_si32(_mm_setr_epi32(5, 0, 0, 0)); assert_eq!(r, 5); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set_epi64x() { + const unsafe fn test_mm_set_epi64x() { let r = _mm_set_epi64x(0, 1); assert_eq_m128i(r, _mm_setr_epi64x(1, 0)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set_epi32() { + const unsafe fn test_mm_set_epi32() { let r = _mm_set_epi32(0, 1, 2, 3); assert_eq_m128i(r, _mm_setr_epi32(3, 2, 1, 0)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set_epi16() { + const unsafe fn test_mm_set_epi16() { let r = _mm_set_epi16(0, 1, 2, 3, 4, 5, 6, 7); assert_eq_m128i(r, _mm_setr_epi16(7, 6, 5, 4, 3, 2, 1, 0)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set_epi8() { + const unsafe fn test_mm_set_epi8() { #[rustfmt::skip] let r = _mm_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -3968,43 +4118,43 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set1_epi64x() { + const unsafe fn test_mm_set1_epi64x() { let r = _mm_set1_epi64x(1); assert_eq_m128i(r, _mm_set1_epi64x(1)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set1_epi32() { + const unsafe fn test_mm_set1_epi32() { let r = _mm_set1_epi32(1); assert_eq_m128i(r, _mm_set1_epi32(1)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set1_epi16() { + const unsafe fn test_mm_set1_epi16() { let r = _mm_set1_epi16(1); assert_eq_m128i(r, _mm_set1_epi16(1)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set1_epi8() { + const unsafe fn test_mm_set1_epi8() { let r = _mm_set1_epi8(1); assert_eq_m128i(r, _mm_set1_epi8(1)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_setr_epi32() { + const unsafe fn test_mm_setr_epi32() { let r = _mm_setr_epi32(0, 1, 2, 3); assert_eq_m128i(r, _mm_setr_epi32(0, 1, 2, 3)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_setr_epi16() { + const unsafe fn test_mm_setr_epi16() { let r = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); assert_eq_m128i(r, _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_setr_epi8() { + const unsafe fn test_mm_setr_epi8() { #[rustfmt::skip] let r = _mm_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -4018,27 +4168,27 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_setzero_si128() { + const unsafe fn test_mm_setzero_si128() { let r = _mm_setzero_si128(); assert_eq_m128i(r, _mm_set1_epi64x(0)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_loadl_epi64() { + const unsafe fn test_mm_loadl_epi64() { let a = _mm_setr_epi64x(6, 5); let r = _mm_loadl_epi64(ptr::addr_of!(a)); assert_eq_m128i(r, _mm_setr_epi64x(6, 0)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_load_si128() { + const unsafe fn test_mm_load_si128() { let a = _mm_set_epi64x(5, 6); let r = _mm_load_si128(ptr::addr_of!(a) as *const _); assert_eq_m128i(a, r); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_loadu_si128() { + const unsafe fn test_mm_loadu_si128() { let a = _mm_set_epi64x(5, 6); let r = _mm_loadu_si128(ptr::addr_of!(a) as *const _); assert_eq_m128i(a, r); @@ -4063,7 +4213,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_store_si128() { + const unsafe fn test_mm_store_si128() { let a = _mm_set1_epi8(9); let mut r = _mm_set1_epi8(0); _mm_store_si128(&mut r, a); @@ -4071,7 +4221,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_storeu_si128() { + const unsafe fn test_mm_storeu_si128() { let a = _mm_set1_epi8(9); let mut r = _mm_set1_epi8(0); _mm_storeu_si128(&mut r, a); @@ -4079,7 +4229,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_storel_epi64() { + const unsafe fn test_mm_storel_epi64() { let a = _mm_setr_epi64x(2, 9); let mut r = _mm_set1_epi8(0); _mm_storel_epi64(&mut r, a); @@ -4111,7 +4261,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_move_epi64() { + const unsafe fn test_mm_move_epi64() { let a = _mm_setr_epi64x(5, 6); let r = _mm_move_epi64(a); assert_eq_m128i(r, _mm_setr_epi64x(5, 0)); @@ -4154,7 +4304,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_extract_epi16() { + const unsafe fn test_mm_extract_epi16() { let a = _mm_setr_epi16(-1, 1, 2, 3, 4, 5, 6, 7); let r1 = _mm_extract_epi16::<0>(a); let r2 = _mm_extract_epi16::<3>(a); @@ -4163,7 +4313,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_insert_epi16() { + const unsafe fn test_mm_insert_epi16() { let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm_insert_epi16::<0>(a, 9); let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7); @@ -4171,7 +4321,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_movemask_epi8() { + const unsafe fn test_mm_movemask_epi8() { #[rustfmt::skip] let a = _mm_setr_epi8( 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01, @@ -4184,7 +4334,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_shuffle_epi32() { + const unsafe fn test_mm_shuffle_epi32() { let a = _mm_setr_epi32(5, 10, 15, 20); let r = _mm_shuffle_epi32::<0b00_01_01_11>(a); let e = _mm_setr_epi32(20, 10, 10, 5); @@ -4192,7 +4342,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_shufflehi_epi16() { + const unsafe fn test_mm_shufflehi_epi16() { let a = _mm_setr_epi16(1, 2, 3, 4, 5, 10, 15, 20); let r = _mm_shufflehi_epi16::<0b00_01_01_11>(a); let e = _mm_setr_epi16(1, 2, 3, 4, 20, 10, 10, 5); @@ -4200,7 +4350,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_shufflelo_epi16() { + const unsafe fn test_mm_shufflelo_epi16() { let a = _mm_setr_epi16(5, 10, 15, 20, 1, 2, 3, 4); let r = _mm_shufflelo_epi16::<0b00_01_01_11>(a); let e = _mm_setr_epi16(20, 10, 10, 5, 1, 2, 3, 4); @@ -4208,7 +4358,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_unpackhi_epi8() { + const unsafe fn test_mm_unpackhi_epi8() { #[rustfmt::skip] let a = _mm_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, @@ -4227,7 +4377,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_unpackhi_epi16() { + const unsafe fn test_mm_unpackhi_epi16() { let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_setr_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_unpackhi_epi16(a, b); @@ -4236,7 +4386,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_unpackhi_epi32() { + const unsafe fn test_mm_unpackhi_epi32() { let a = _mm_setr_epi32(0, 1, 2, 3); let b = _mm_setr_epi32(4, 5, 6, 7); let r = _mm_unpackhi_epi32(a, b); @@ -4245,7 +4395,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_unpackhi_epi64() { + const unsafe fn test_mm_unpackhi_epi64() { let a = _mm_setr_epi64x(0, 1); let b = _mm_setr_epi64x(2, 3); let r = _mm_unpackhi_epi64(a, b); @@ -4254,7 +4404,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_unpacklo_epi8() { + const unsafe fn test_mm_unpacklo_epi8() { #[rustfmt::skip] let a = _mm_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, @@ -4274,7 +4424,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_unpacklo_epi16() { + const unsafe fn test_mm_unpacklo_epi16() { let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm_setr_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_unpacklo_epi16(a, b); @@ -4283,7 +4433,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_unpacklo_epi32() { + const unsafe fn test_mm_unpacklo_epi32() { let a = _mm_setr_epi32(0, 1, 2, 3); let b = _mm_setr_epi32(4, 5, 6, 7); let r = _mm_unpacklo_epi32(a, b); @@ -4292,7 +4442,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_unpacklo_epi64() { + const unsafe fn test_mm_unpacklo_epi64() { let a = _mm_setr_epi64x(0, 1); let b = _mm_setr_epi64x(2, 3); let r = _mm_unpacklo_epi64(a, b); @@ -4301,7 +4451,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_add_sd() { + const unsafe fn test_mm_add_sd() { let a = _mm_setr_pd(1.0, 2.0); let b = _mm_setr_pd(5.0, 10.0); let r = _mm_add_sd(a, b); @@ -4309,7 +4459,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_add_pd() { + const unsafe fn test_mm_add_pd() { let a = _mm_setr_pd(1.0, 2.0); let b = _mm_setr_pd(5.0, 10.0); let r = _mm_add_pd(a, b); @@ -4317,7 +4467,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_div_sd() { + const unsafe fn test_mm_div_sd() { let a = _mm_setr_pd(1.0, 2.0); let b = _mm_setr_pd(5.0, 10.0); let r = _mm_div_sd(a, b); @@ -4325,7 +4475,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_div_pd() { + const unsafe fn test_mm_div_pd() { let a = _mm_setr_pd(1.0, 2.0); let b = _mm_setr_pd(5.0, 10.0); let r = _mm_div_pd(a, b); @@ -4387,7 +4537,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_mul_sd() { + const unsafe fn test_mm_mul_sd() { let a = _mm_setr_pd(1.0, 2.0); let b = _mm_setr_pd(5.0, 10.0); let r = _mm_mul_sd(a, b); @@ -4395,7 +4545,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_mul_pd() { + const unsafe fn test_mm_mul_pd() { let a = _mm_setr_pd(1.0, 2.0); let b = _mm_setr_pd(5.0, 10.0); let r = _mm_mul_pd(a, b); @@ -4417,7 +4567,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_sub_sd() { + const unsafe fn test_mm_sub_sd() { let a = _mm_setr_pd(1.0, 2.0); let b = _mm_setr_pd(5.0, 10.0); let r = _mm_sub_sd(a, b); @@ -4425,7 +4575,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_sub_pd() { + const unsafe fn test_mm_sub_pd() { let a = _mm_setr_pd(1.0, 2.0); let b = _mm_setr_pd(5.0, 10.0); let r = _mm_sub_pd(a, b); @@ -4433,7 +4583,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_and_pd() { + const unsafe fn test_mm_and_pd() { let a = transmute(u64x2::splat(5)); let b = transmute(u64x2::splat(3)); let r = _mm_and_pd(a, b); @@ -4442,7 +4592,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_andnot_pd() { + const unsafe fn test_mm_andnot_pd() { let a = transmute(u64x2::splat(5)); let b = transmute(u64x2::splat(3)); let r = _mm_andnot_pd(a, b); @@ -4451,7 +4601,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_or_pd() { + const unsafe fn test_mm_or_pd() { let a = transmute(u64x2::splat(5)); let b = transmute(u64x2::splat(3)); let r = _mm_or_pd(a, b); @@ -4460,7 +4610,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_xor_pd() { + const unsafe fn test_mm_xor_pd() { let a = transmute(u64x2::splat(5)); let b = transmute(u64x2::splat(3)); let r = _mm_xor_pd(a, b); @@ -4739,7 +4889,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_movemask_pd() { + const unsafe fn test_mm_movemask_pd() { let r = _mm_movemask_pd(_mm_setr_pd(-1.0, 5.0)); assert_eq!(r, 0b01); @@ -4753,7 +4903,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_load_pd() { + const unsafe fn test_mm_load_pd() { let mem = Memory { data: [1.0f64, 2.0, 3.0, 4.0], }; @@ -4765,7 +4915,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_load_sd() { + const unsafe fn test_mm_load_sd() { let a = 1.; let expected = _mm_setr_pd(a, 0.); let r = _mm_load_sd(&a); @@ -4773,7 +4923,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_loadh_pd() { + const unsafe fn test_mm_loadh_pd() { let a = _mm_setr_pd(1., 2.); let b = 3.; let expected = _mm_setr_pd(_mm_cvtsd_f64(a), 3.); @@ -4782,7 +4932,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_loadl_pd() { + const unsafe fn test_mm_loadl_pd() { let a = _mm_setr_pd(1., 2.); let b = 3.; let expected = _mm_setr_pd(3., get_m128d(a, 1)); @@ -4810,7 +4960,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_store_sd() { + const unsafe fn test_mm_store_sd() { let mut dest = 0.; let a = _mm_setr_pd(1., 2.); _mm_store_sd(&mut dest, a); @@ -4818,7 +4968,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_store_pd() { + const unsafe fn test_mm_store_pd() { let mut mem = Memory { data: [0.0f64; 4] }; let vals = &mut mem.data; let a = _mm_setr_pd(1.0, 2.0); @@ -4830,31 +4980,21 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_storeu_pd() { + const unsafe fn test_mm_storeu_pd() { + // guaranteed to be aligned to 16 bytes let mut mem = Memory { data: [0.0f64; 4] }; let vals = &mut mem.data; let a = _mm_setr_pd(1.0, 2.0); - let mut ofs = 0; - let mut p = vals.as_mut_ptr(); - - // Make sure p is **not** aligned to 16-byte boundary - if (p as usize) & 0xf == 0 { - ofs = 1; - p = p.add(1); - } - + // so p is *not* aligned to 16 bytes + let p = vals.as_mut_ptr().offset(1); _mm_storeu_pd(p, *black_box(&a)); - if ofs > 0 { - assert_eq!(vals[ofs - 1], 0.0); - } - assert_eq!(vals[ofs + 0], 1.0); - assert_eq!(vals[ofs + 1], 2.0); + assert_eq!(*vals, [0.0, 1.0, 2.0, 0.0]); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_storeu_si16() { + const unsafe fn test_mm_storeu_si16() { let a = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8); let mut r = _mm_setr_epi16(9, 10, 11, 12, 13, 14, 15, 16); _mm_storeu_si16(ptr::addr_of_mut!(r).cast(), a); @@ -4863,7 +5003,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_storeu_si32() { + const unsafe fn test_mm_storeu_si32() { let a = _mm_setr_epi32(1, 2, 3, 4); let mut r = _mm_setr_epi32(5, 6, 7, 8); _mm_storeu_si32(ptr::addr_of_mut!(r).cast(), a); @@ -4872,7 +5012,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_storeu_si64() { + const unsafe fn test_mm_storeu_si64() { let a = _mm_setr_epi64x(1, 2); let mut r = _mm_setr_epi64x(3, 4); _mm_storeu_si64(ptr::addr_of_mut!(r).cast(), a); @@ -4881,7 +5021,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_store1_pd() { + const unsafe fn test_mm_store1_pd() { let mut mem = Memory { data: [0.0f64; 4] }; let vals = &mut mem.data; let a = _mm_setr_pd(1.0, 2.0); @@ -4893,7 +5033,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_store_pd1() { + const unsafe fn test_mm_store_pd1() { let mut mem = Memory { data: [0.0f64; 4] }; let vals = &mut mem.data; let a = _mm_setr_pd(1.0, 2.0); @@ -4905,7 +5045,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_storer_pd() { + const unsafe fn test_mm_storer_pd() { let mut mem = Memory { data: [0.0f64; 4] }; let vals = &mut mem.data; let a = _mm_setr_pd(1.0, 2.0); @@ -4917,7 +5057,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_storeh_pd() { + const unsafe fn test_mm_storeh_pd() { let mut dest = 0.; let a = _mm_setr_pd(1., 2.); _mm_storeh_pd(&mut dest, a); @@ -4925,7 +5065,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_storel_pd() { + const unsafe fn test_mm_storel_pd() { let mut dest = 0.; let a = _mm_setr_pd(1., 2.); _mm_storel_pd(&mut dest, a); @@ -4933,7 +5073,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_loadr_pd() { + const unsafe fn test_mm_loadr_pd() { let mut mem = Memory { data: [1.0f64, 2.0, 3.0, 4.0], }; @@ -4945,48 +5085,44 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_loadu_pd() { + const unsafe fn test_mm_loadu_pd() { + // guaranteed to be aligned to 16 bytes let mut mem = Memory { data: [1.0f64, 2.0, 3.0, 4.0], }; let vals = &mut mem.data; - let mut d = vals.as_ptr(); - // make sure d is not aligned to 16-byte boundary - let mut offset = 0; - if (d as usize) & 0xf == 0 { - offset = 1; - d = d.add(offset); - } + // so this will *not* be aligned to 16 bytes + let d = vals.as_ptr().offset(1); let r = _mm_loadu_pd(d); - let e = _mm_add_pd(_mm_setr_pd(1.0, 2.0), _mm_set1_pd(offset as f64)); + let e = _mm_setr_pd(2.0, 3.0); assert_eq_m128d(r, e); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_loadu_si16() { + const unsafe fn test_mm_loadu_si16() { let a = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm_loadu_si16(ptr::addr_of!(a) as *const _); assert_eq_m128i(r, _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_loadu_si32() { + const unsafe fn test_mm_loadu_si32() { let a = _mm_setr_epi32(1, 2, 3, 4); let r = _mm_loadu_si32(ptr::addr_of!(a) as *const _); assert_eq_m128i(r, _mm_setr_epi32(1, 0, 0, 0)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_loadu_si64() { + const unsafe fn test_mm_loadu_si64() { let a = _mm_setr_epi64x(5, 6); let r = _mm_loadu_si64(ptr::addr_of!(a) as *const _); assert_eq_m128i(r, _mm_setr_epi64x(5, 0)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtpd_ps() { + const unsafe fn test_mm_cvtpd_ps() { let r = _mm_cvtpd_ps(_mm_setr_pd(-1.0, 5.0)); assert_eq_m128(r, _mm_setr_ps(-1.0, 5.0, 0.0, 0.0)); @@ -5001,7 +5137,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtps_pd() { + const unsafe fn test_mm_cvtps_pd() { let r = _mm_cvtps_pd(_mm_setr_ps(-1.0, 2.0, -3.0, 5.0)); assert_eq_m128d(r, _mm_setr_pd(-1.0, 2.0)); @@ -5070,13 +5206,13 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtsd_f64() { + const unsafe fn test_mm_cvtsd_f64() { let r = _mm_cvtsd_f64(_mm_setr_pd(-1.1, 2.2)); assert_eq!(r, -1.1); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtss_sd() { + const unsafe fn test_mm_cvtss_sd() { let a = _mm_setr_pd(-1.1, 2.2); let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); @@ -5124,57 +5260,57 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set_sd() { + const unsafe fn test_mm_set_sd() { let r = _mm_set_sd(-1.0_f64); assert_eq_m128d(r, _mm_setr_pd(-1.0_f64, 0_f64)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set1_pd() { + const unsafe fn test_mm_set1_pd() { let r = _mm_set1_pd(-1.0_f64); assert_eq_m128d(r, _mm_setr_pd(-1.0_f64, -1.0_f64)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set_pd1() { + const unsafe fn test_mm_set_pd1() { let r = _mm_set_pd1(-2.0_f64); assert_eq_m128d(r, _mm_setr_pd(-2.0_f64, -2.0_f64)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_set_pd() { + const unsafe fn test_mm_set_pd() { let r = _mm_set_pd(1.0_f64, 5.0_f64); assert_eq_m128d(r, _mm_setr_pd(5.0_f64, 1.0_f64)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_setr_pd() { + const unsafe fn test_mm_setr_pd() { let r = _mm_setr_pd(1.0_f64, -5.0_f64); assert_eq_m128d(r, _mm_setr_pd(1.0_f64, -5.0_f64)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_setzero_pd() { + const unsafe fn test_mm_setzero_pd() { let r = _mm_setzero_pd(); assert_eq_m128d(r, _mm_setr_pd(0_f64, 0_f64)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_load1_pd() { + const unsafe fn test_mm_load1_pd() { let d = -5.0; let r = _mm_load1_pd(&d); assert_eq_m128d(r, _mm_setr_pd(d, d)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_load_pd1() { + const unsafe fn test_mm_load_pd1() { let d = -5.0; let r = _mm_load_pd1(&d); assert_eq_m128d(r, _mm_setr_pd(d, d)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_unpackhi_pd() { + const unsafe fn test_mm_unpackhi_pd() { let a = _mm_setr_pd(1.0, 2.0); let b = _mm_setr_pd(3.0, 4.0); let r = _mm_unpackhi_pd(a, b); @@ -5182,7 +5318,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_unpacklo_pd() { + const unsafe fn test_mm_unpacklo_pd() { let a = _mm_setr_pd(1.0, 2.0); let b = _mm_setr_pd(3.0, 4.0); let r = _mm_unpacklo_pd(a, b); @@ -5190,7 +5326,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_shuffle_pd() { + const unsafe fn test_mm_shuffle_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(3., 4.); let expected = _mm_setr_pd(1., 3.); @@ -5199,7 +5335,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_move_sd() { + const unsafe fn test_mm_move_sd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(3., 4.); let expected = _mm_setr_pd(3., 2.); @@ -5208,7 +5344,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_castpd_ps() { + const unsafe fn test_mm_castpd_ps() { let a = _mm_set1_pd(0.); let expected = _mm_set1_ps(0.); let r = _mm_castpd_ps(a); @@ -5216,7 +5352,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_castpd_si128() { + const unsafe fn test_mm_castpd_si128() { let a = _mm_set1_pd(0.); let expected = _mm_set1_epi64x(0); let r = _mm_castpd_si128(a); @@ -5224,7 +5360,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_castps_pd() { + const unsafe fn test_mm_castps_pd() { let a = _mm_set1_ps(0.); let expected = _mm_set1_pd(0.); let r = _mm_castps_pd(a); @@ -5232,7 +5368,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_castps_si128() { + const unsafe fn test_mm_castps_si128() { let a = _mm_set1_ps(0.); let expected = _mm_set1_epi32(0); let r = _mm_castps_si128(a); @@ -5240,7 +5376,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_castsi128_pd() { + const unsafe fn test_mm_castsi128_pd() { let a = _mm_set1_epi64x(0); let expected = _mm_set1_pd(0.); let r = _mm_castsi128_pd(a); @@ -5248,7 +5384,7 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_castsi128_ps() { + const unsafe fn test_mm_castsi128_ps() { let a = _mm_set1_epi32(0); let expected = _mm_set1_ps(0.); let r = _mm_castsi128_ps(a); diff --git a/crates/core_arch/src/x86/sse3.rs b/crates/core_arch/src/x86/sse3.rs index 79be7a7e9b..eebca864f4 100644 --- a/crates/core_arch/src/x86/sse3.rs +++ b/crates/core_arch/src/x86/sse3.rs @@ -14,7 +14,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "sse3")] #[cfg_attr(test, assert_instr(addsubps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_addsub_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_addsub_ps(a: __m128, b: __m128) -> __m128 { unsafe { let a = a.as_f32x4(); let b = b.as_f32x4(); @@ -32,7 +33,8 @@ pub fn _mm_addsub_ps(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse3")] #[cfg_attr(test, assert_instr(addsubpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_addsub_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_addsub_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { let a = a.as_f64x2(); let b = b.as_f64x2(); @@ -50,7 +52,8 @@ pub fn _mm_addsub_pd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse3")] #[cfg_attr(test, assert_instr(haddpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_hadd_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_hadd_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { let even = simd_shuffle!(a, b, [0, 2]); let odd = simd_shuffle!(a, b, [1, 3]); @@ -66,7 +69,8 @@ pub fn _mm_hadd_pd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse3")] #[cfg_attr(test, assert_instr(haddps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_hadd_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_hadd_ps(a: __m128, b: __m128) -> __m128 { unsafe { let even = simd_shuffle!(a, b, [0, 2, 4, 6]); let odd = simd_shuffle!(a, b, [1, 3, 5, 7]); @@ -82,7 +86,8 @@ pub fn _mm_hadd_ps(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse3")] #[cfg_attr(test, assert_instr(hsubpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_hsub_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_hsub_pd(a: __m128d, b: __m128d) -> __m128d { unsafe { let even = simd_shuffle!(a, b, [0, 2]); let odd = simd_shuffle!(a, b, [1, 3]); @@ -98,7 +103,8 @@ pub fn _mm_hsub_pd(a: __m128d, b: __m128d) -> __m128d { #[target_feature(enable = "sse3")] #[cfg_attr(test, assert_instr(hsubps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_hsub_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_hsub_ps(a: __m128, b: __m128) -> __m128 { unsafe { let even = simd_shuffle!(a, b, [0, 2, 4, 6]); let odd = simd_shuffle!(a, b, [1, 3, 5, 7]); @@ -127,7 +133,8 @@ pub unsafe fn _mm_lddqu_si128(mem_addr: *const __m128i) -> __m128i { #[target_feature(enable = "sse3")] #[cfg_attr(test, assert_instr(movddup))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_movedup_pd(a: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movedup_pd(a: __m128d) -> __m128d { unsafe { simd_shuffle!(a, a, [0, 0]) } } @@ -139,7 +146,8 @@ pub fn _mm_movedup_pd(a: __m128d) -> __m128d { #[target_feature(enable = "sse3")] #[cfg_attr(test, assert_instr(movddup))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub unsafe fn _mm_loaddup_pd(mem_addr: *const f64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const unsafe fn _mm_loaddup_pd(mem_addr: *const f64) -> __m128d { _mm_load1_pd(mem_addr) } @@ -151,7 +159,8 @@ pub unsafe fn _mm_loaddup_pd(mem_addr: *const f64) -> __m128d { #[target_feature(enable = "sse3")] #[cfg_attr(test, assert_instr(movshdup))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_movehdup_ps(a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_movehdup_ps(a: __m128) -> __m128 { unsafe { simd_shuffle!(a, a, [1, 1, 3, 3]) } } @@ -163,7 +172,8 @@ pub fn _mm_movehdup_ps(a: __m128) -> __m128 { #[target_feature(enable = "sse3")] #[cfg_attr(test, assert_instr(movsldup))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_moveldup_ps(a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_moveldup_ps(a: __m128) -> __m128 { unsafe { simd_shuffle!(a, a, [0, 0, 2, 2]) } } @@ -175,12 +185,13 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86::*; #[simd_test(enable = "sse3")] - unsafe fn test_mm_addsub_ps() { + const unsafe fn test_mm_addsub_ps() { let a = _mm_setr_ps(-1.0, 5.0, 0.0, -10.0); let b = _mm_setr_ps(-100.0, 20.0, 0.0, -5.0); let r = _mm_addsub_ps(a, b); @@ -188,7 +199,7 @@ mod tests { } #[simd_test(enable = "sse3")] - unsafe fn test_mm_addsub_pd() { + const unsafe fn test_mm_addsub_pd() { let a = _mm_setr_pd(-1.0, 5.0); let b = _mm_setr_pd(-100.0, 20.0); let r = _mm_addsub_pd(a, b); @@ -196,7 +207,7 @@ mod tests { } #[simd_test(enable = "sse3")] - unsafe fn test_mm_hadd_pd() { + const unsafe fn test_mm_hadd_pd() { let a = _mm_setr_pd(-1.0, 5.0); let b = _mm_setr_pd(-100.0, 20.0); let r = _mm_hadd_pd(a, b); @@ -204,7 +215,7 @@ mod tests { } #[simd_test(enable = "sse3")] - unsafe fn test_mm_hadd_ps() { + const unsafe fn test_mm_hadd_ps() { let a = _mm_setr_ps(-1.0, 5.0, 0.0, -10.0); let b = _mm_setr_ps(-100.0, 20.0, 0.0, -5.0); let r = _mm_hadd_ps(a, b); @@ -212,7 +223,7 @@ mod tests { } #[simd_test(enable = "sse3")] - unsafe fn test_mm_hsub_pd() { + const unsafe fn test_mm_hsub_pd() { let a = _mm_setr_pd(-1.0, 5.0); let b = _mm_setr_pd(-100.0, 20.0); let r = _mm_hsub_pd(a, b); @@ -220,7 +231,7 @@ mod tests { } #[simd_test(enable = "sse3")] - unsafe fn test_mm_hsub_ps() { + const unsafe fn test_mm_hsub_ps() { let a = _mm_setr_ps(-1.0, 5.0, 0.0, -10.0); let b = _mm_setr_ps(-100.0, 20.0, 0.0, -5.0); let r = _mm_hsub_ps(a, b); @@ -241,28 +252,28 @@ mod tests { } #[simd_test(enable = "sse3")] - unsafe fn test_mm_movedup_pd() { + const unsafe fn test_mm_movedup_pd() { let a = _mm_setr_pd(-1.0, 5.0); let r = _mm_movedup_pd(a); assert_eq_m128d(r, _mm_setr_pd(-1.0, -1.0)); } #[simd_test(enable = "sse3")] - unsafe fn test_mm_movehdup_ps() { + const unsafe fn test_mm_movehdup_ps() { let a = _mm_setr_ps(-1.0, 5.0, 0.0, -10.0); let r = _mm_movehdup_ps(a); assert_eq_m128(r, _mm_setr_ps(5.0, 5.0, -10.0, -10.0)); } #[simd_test(enable = "sse3")] - unsafe fn test_mm_moveldup_ps() { + const unsafe fn test_mm_moveldup_ps() { let a = _mm_setr_ps(-1.0, 5.0, 0.0, -10.0); let r = _mm_moveldup_ps(a); assert_eq_m128(r, _mm_setr_ps(-1.0, -1.0, 0.0, 0.0)); } #[simd_test(enable = "sse3")] - unsafe fn test_mm_loaddup_pd() { + const unsafe fn test_mm_loaddup_pd() { let d = -5.0; let r = _mm_loaddup_pd(&d); assert_eq_m128d(r, _mm_setr_pd(d, d)); diff --git a/crates/core_arch/src/x86/sse41.rs b/crates/core_arch/src/x86/sse41.rs index 6c16ee7600..a83f8cc747 100644 --- a/crates/core_arch/src/x86/sse41.rs +++ b/crates/core_arch/src/x86/sse41.rs @@ -59,7 +59,8 @@ pub const _MM_FROUND_NEARBYINT: i32 = _MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTI #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pblendvb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_blendv_epi8(a: __m128i, b: __m128i, mask: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_blendv_epi8(a: __m128i, b: __m128i, mask: __m128i) -> __m128i { unsafe { let mask: i8x16 = simd_lt(mask.as_i8x16(), i8x16::ZERO); transmute(simd_select(mask, b.as_i8x16(), a.as_i8x16())) @@ -78,7 +79,8 @@ pub fn _mm_blendv_epi8(a: __m128i, b: __m128i, mask: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pblendw, IMM8 = 0xB1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_blend_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_blend_epi16(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); unsafe { transmute::(simd_shuffle!( @@ -106,7 +108,8 @@ pub fn _mm_blend_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(blendvpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_blendv_pd(a: __m128d, b: __m128d, mask: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_blendv_pd(a: __m128d, b: __m128d, mask: __m128d) -> __m128d { unsafe { let mask: i64x2 = simd_lt(transmute::<_, i64x2>(mask), i64x2::ZERO); transmute(simd_select(mask, b.as_f64x2(), a.as_f64x2())) @@ -121,7 +124,8 @@ pub fn _mm_blendv_pd(a: __m128d, b: __m128d, mask: __m128d) -> __m128d { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(blendvps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_blendv_ps(a: __m128, b: __m128, mask: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_blendv_ps(a: __m128, b: __m128, mask: __m128) -> __m128 { unsafe { let mask: i32x4 = simd_lt(transmute::<_, i32x4>(mask), i32x4::ZERO); transmute(simd_select(mask, b.as_f32x4(), a.as_f32x4())) @@ -140,7 +144,8 @@ pub fn _mm_blendv_ps(a: __m128, b: __m128, mask: __m128) -> __m128 { #[cfg_attr(test, assert_instr(blendps, IMM2 = 0b10))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_blend_pd(a: __m128d, b: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_blend_pd(a: __m128d, b: __m128d) -> __m128d { static_assert_uimm_bits!(IMM2, 2); unsafe { transmute::(simd_shuffle!( @@ -160,7 +165,8 @@ pub fn _mm_blend_pd(a: __m128d, b: __m128d) -> __m128d { #[cfg_attr(test, assert_instr(blendps, IMM4 = 0b0101))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_blend_ps(a: __m128, b: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_blend_ps(a: __m128, b: __m128) -> __m128 { static_assert_uimm_bits!(IMM4, 4); unsafe { transmute::(simd_shuffle!( @@ -207,7 +213,8 @@ pub fn _mm_blend_ps(a: __m128, b: __m128) -> __m128 { #[cfg_attr(test, assert_instr(extractps, IMM8 = 0))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_extract_ps(a: __m128) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_extract_ps(a: __m128) -> i32 { static_assert_uimm_bits!(IMM8, 2); unsafe { simd_extract!(a, IMM8 as u32, f32).to_bits() as i32 } } @@ -223,7 +230,8 @@ pub fn _mm_extract_ps(a: __m128) -> i32 { #[cfg_attr(test, assert_instr(pextrb, IMM8 = 0))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_extract_epi8(a: __m128i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_extract_epi8(a: __m128i) -> i32 { static_assert_uimm_bits!(IMM8, 4); unsafe { simd_extract!(a.as_u8x16(), IMM8 as u32, u8) as i32 } } @@ -236,7 +244,8 @@ pub fn _mm_extract_epi8(a: __m128i) -> i32 { #[cfg_attr(test, assert_instr(extractps, IMM8 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_extract_epi32(a: __m128i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_extract_epi32(a: __m128i) -> i32 { static_assert_uimm_bits!(IMM8, 2); unsafe { simd_extract!(a.as_i32x4(), IMM8 as u32, i32) } } @@ -284,7 +293,8 @@ pub fn _mm_insert_ps(a: __m128, b: __m128) -> __m128 { #[cfg_attr(test, assert_instr(pinsrb, IMM8 = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_insert_epi8(a: __m128i, i: i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_insert_epi8(a: __m128i, i: i32) -> __m128i { static_assert_uimm_bits!(IMM8, 4); unsafe { transmute(simd_insert!(a.as_i8x16(), IMM8 as u32, i as i8)) } } @@ -298,7 +308,8 @@ pub fn _mm_insert_epi8(a: __m128i, i: i32) -> __m128i { #[cfg_attr(test, assert_instr(pinsrd, IMM8 = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_insert_epi32(a: __m128i, i: i32) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_insert_epi32(a: __m128i, i: i32) -> __m128i { static_assert_uimm_bits!(IMM8, 2); unsafe { transmute(simd_insert!(a.as_i32x4(), IMM8 as u32, i)) } } @@ -311,7 +322,8 @@ pub fn _mm_insert_epi32(a: __m128i, i: i32) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmaxsb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_max_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_max_epi8(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imax(a.as_i8x16(), b.as_i8x16()).as_m128i() } } @@ -323,7 +335,8 @@ pub fn _mm_max_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmaxuw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_max_epu16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_max_epu16(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imax(a.as_u16x8(), b.as_u16x8()).as_m128i() } } @@ -335,7 +348,8 @@ pub fn _mm_max_epu16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmaxsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_max_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_max_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imax(a.as_i32x4(), b.as_i32x4()).as_m128i() } } @@ -347,7 +361,8 @@ pub fn _mm_max_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmaxud))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_max_epu32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_max_epu32(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imax(a.as_u32x4(), b.as_u32x4()).as_m128i() } } @@ -359,7 +374,8 @@ pub fn _mm_max_epu32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pminsb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_min_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_min_epi8(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imin(a.as_i8x16(), b.as_i8x16()).as_m128i() } } @@ -371,7 +387,8 @@ pub fn _mm_min_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pminuw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_min_epu16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_min_epu16(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imin(a.as_u16x8(), b.as_u16x8()).as_m128i() } } @@ -383,7 +400,8 @@ pub fn _mm_min_epu16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pminsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_min_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_min_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imin(a.as_i32x4(), b.as_i32x4()).as_m128i() } } @@ -395,7 +413,8 @@ pub fn _mm_min_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pminud))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_min_epu32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_min_epu32(a: __m128i, b: __m128i) -> __m128i { unsafe { simd_imin(a.as_u32x4(), b.as_u32x4()).as_m128i() } } @@ -418,7 +437,8 @@ pub fn _mm_packus_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pcmpeqq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cmpeq_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpeq_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_eq::<_, i64x2>(a.as_i64x2(), b.as_i64x2())) } } @@ -429,7 +449,8 @@ pub fn _mm_cmpeq_epi64(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovsxbw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepi8_epi16(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepi8_epi16(a: __m128i) -> __m128i { unsafe { let a = a.as_i8x16(); let a: i8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -444,7 +465,8 @@ pub fn _mm_cvtepi8_epi16(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovsxbd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepi8_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepi8_epi32(a: __m128i) -> __m128i { unsafe { let a = a.as_i8x16(); let a: i8x4 = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -460,7 +482,8 @@ pub fn _mm_cvtepi8_epi32(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovsxbq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepi8_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepi8_epi64(a: __m128i) -> __m128i { unsafe { let a = a.as_i8x16(); let a: i8x2 = simd_shuffle!(a, a, [0, 1]); @@ -475,7 +498,8 @@ pub fn _mm_cvtepi8_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovsxwd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepi16_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepi16_epi32(a: __m128i) -> __m128i { unsafe { let a = a.as_i16x8(); let a: i16x4 = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -490,7 +514,8 @@ pub fn _mm_cvtepi16_epi32(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovsxwq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepi16_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepi16_epi64(a: __m128i) -> __m128i { unsafe { let a = a.as_i16x8(); let a: i16x2 = simd_shuffle!(a, a, [0, 1]); @@ -505,7 +530,8 @@ pub fn _mm_cvtepi16_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovsxdq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepi32_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepi32_epi64(a: __m128i) -> __m128i { unsafe { let a = a.as_i32x4(); let a: i32x2 = simd_shuffle!(a, a, [0, 1]); @@ -520,7 +546,8 @@ pub fn _mm_cvtepi32_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovzxbw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepu8_epi16(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepu8_epi16(a: __m128i) -> __m128i { unsafe { let a = a.as_u8x16(); let a: u8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); @@ -535,7 +562,8 @@ pub fn _mm_cvtepu8_epi16(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovzxbd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepu8_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepu8_epi32(a: __m128i) -> __m128i { unsafe { let a = a.as_u8x16(); let a: u8x4 = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -550,7 +578,8 @@ pub fn _mm_cvtepu8_epi32(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovzxbq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepu8_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepu8_epi64(a: __m128i) -> __m128i { unsafe { let a = a.as_u8x16(); let a: u8x2 = simd_shuffle!(a, a, [0, 1]); @@ -566,7 +595,8 @@ pub fn _mm_cvtepu8_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovzxwd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepu16_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepu16_epi32(a: __m128i) -> __m128i { unsafe { let a = a.as_u16x8(); let a: u16x4 = simd_shuffle!(a, a, [0, 1, 2, 3]); @@ -582,7 +612,8 @@ pub fn _mm_cvtepu16_epi32(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovzxwq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepu16_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepu16_epi64(a: __m128i) -> __m128i { unsafe { let a = a.as_u16x8(); let a: u16x2 = simd_shuffle!(a, a, [0, 1]); @@ -598,7 +629,8 @@ pub fn _mm_cvtepu16_epi64(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmovzxdq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtepu32_epi64(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtepu32_epi64(a: __m128i) -> __m128i { unsafe { let a = a.as_u32x4(); let a: u32x2 = simd_shuffle!(a, a, [0, 1]); @@ -655,7 +687,8 @@ pub fn _mm_dp_ps(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(roundpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_floor_pd(a: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_floor_pd(a: __m128d) -> __m128d { unsafe { simd_floor(a) } } @@ -668,7 +701,8 @@ pub fn _mm_floor_pd(a: __m128d) -> __m128d { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(roundps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_floor_ps(a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_floor_ps(a: __m128) -> __m128 { unsafe { simd_floor(a) } } @@ -711,7 +745,8 @@ pub fn _mm_floor_ss(a: __m128, b: __m128) -> __m128 { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(roundpd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_ceil_pd(a: __m128d) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_ceil_pd(a: __m128d) -> __m128d { unsafe { simd_ceil(a) } } @@ -724,7 +759,8 @@ pub fn _mm_ceil_pd(a: __m128d) -> __m128d { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(roundps))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_ceil_ps(a: __m128) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_ceil_ps(a: __m128) -> __m128 { unsafe { simd_ceil(a) } } @@ -887,7 +923,8 @@ pub fn _mm_minpos_epu16(a: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmuldq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_mul_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mul_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { let a = simd_cast::<_, i64x2>(simd_cast::<_, i32x2>(a.as_i64x2())); let b = simd_cast::<_, i64x2>(simd_cast::<_, i32x2>(b.as_i64x2())); @@ -907,7 +944,8 @@ pub fn _mm_mul_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(pmulld))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_mullo_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_mullo_epi32(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_mul(a.as_i32x4(), b.as_i32x4())) } } @@ -973,7 +1011,8 @@ pub fn _mm_mpsadbw_epu8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(ptest))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_testz_si128(a: __m128i, mask: __m128i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_testz_si128(a: __m128i, mask: __m128i) -> i32 { unsafe { let r = simd_reduce_or(simd_and(a.as_i64x2(), mask.as_i64x2())); (0i64 == r) as i32 @@ -999,7 +1038,8 @@ pub fn _mm_testz_si128(a: __m128i, mask: __m128i) -> i32 { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(ptest))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_testc_si128(a: __m128i, mask: __m128i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_testc_si128(a: __m128i, mask: __m128i) -> i32 { unsafe { let r = simd_reduce_or(simd_and( simd_xor(a.as_i64x2(), i64x2::splat(!0)), @@ -1051,7 +1091,8 @@ pub fn _mm_testnzc_si128(a: __m128i, mask: __m128i) -> i32 { #[target_feature(enable = "sse4.1")] #[cfg_attr(test, assert_instr(ptest))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_test_all_zeros(a: __m128i, mask: __m128i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_test_all_zeros(a: __m128i, mask: __m128i) -> i32 { _mm_testz_si128(a, mask) } @@ -1073,7 +1114,8 @@ pub fn _mm_test_all_zeros(a: __m128i, mask: __m128i) -> i32 { #[cfg_attr(test, assert_instr(pcmpeqd))] #[cfg_attr(test, assert_instr(ptest))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_test_all_ones(a: __m128i) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_test_all_ones(a: __m128i) -> i32 { _mm_testc_si128(a, _mm_cmpeq_epi32(a, a)) } @@ -1148,12 +1190,13 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use crate::core_arch::x86::*; use std::mem; use stdarch_test::simd_test; #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_blendv_epi8() { + const unsafe fn test_mm_blendv_epi8() { #[rustfmt::skip] let a = _mm_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, @@ -1176,7 +1219,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_blendv_pd() { + const unsafe fn test_mm_blendv_pd() { let a = _mm_set1_pd(0.0); let b = _mm_set1_pd(1.0); let mask = transmute(_mm_setr_epi64x(0, -1)); @@ -1186,7 +1229,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_blendv_ps() { + const unsafe fn test_mm_blendv_ps() { let a = _mm_set1_ps(0.0); let b = _mm_set1_ps(1.0); let mask = transmute(_mm_setr_epi32(0, -1, 0, -1)); @@ -1196,7 +1239,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_blend_pd() { + const unsafe fn test_mm_blend_pd() { let a = _mm_set1_pd(0.0); let b = _mm_set1_pd(1.0); let r = _mm_blend_pd::<0b10>(a, b); @@ -1205,7 +1248,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_blend_ps() { + const unsafe fn test_mm_blend_ps() { let a = _mm_set1_ps(0.0); let b = _mm_set1_ps(1.0); let r = _mm_blend_ps::<0b1010>(a, b); @@ -1214,7 +1257,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_blend_epi16() { + const unsafe fn test_mm_blend_epi16() { let a = _mm_set1_epi16(0); let b = _mm_set1_epi16(1); let r = _mm_blend_epi16::<0b1010_1100>(a, b); @@ -1223,7 +1266,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_extract_ps() { + const unsafe fn test_mm_extract_ps() { let a = _mm_setr_ps(0.0, 1.0, 2.0, 3.0); let r: f32 = f32::from_bits(_mm_extract_ps::<1>(a) as u32); assert_eq!(r, 1.0); @@ -1232,7 +1275,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_extract_epi8() { + const unsafe fn test_mm_extract_epi8() { #[rustfmt::skip] let a = _mm_setr_epi8( -1, 1, 2, 3, 4, 5, 6, 7, @@ -1245,7 +1288,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_extract_epi32() { + const unsafe fn test_mm_extract_epi32() { let a = _mm_setr_epi32(0, 1, 2, 3); let r = _mm_extract_epi32::<1>(a); assert_eq!(r, 1); @@ -1270,7 +1313,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_insert_epi8() { + const unsafe fn test_mm_insert_epi8() { let a = _mm_set1_epi8(0); let e = _mm_setr_epi8(0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); let r = _mm_insert_epi8::<1>(a, 32); @@ -1281,7 +1324,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_insert_epi32() { + const unsafe fn test_mm_insert_epi32() { let a = _mm_set1_epi32(0); let e = _mm_setr_epi32(0, 32, 0, 0); let r = _mm_insert_epi32::<1>(a, 32); @@ -1292,7 +1335,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_max_epi8() { + const unsafe fn test_mm_max_epi8() { #[rustfmt::skip] let a = _mm_setr_epi8( 1, 4, 5, 8, 9, 12, 13, 16, @@ -1313,7 +1356,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_max_epu16() { + const unsafe fn test_mm_max_epu16() { let a = _mm_setr_epi16(1, 4, 5, 8, 9, 12, 13, 16); let b = _mm_setr_epi16(2, 3, 6, 7, 10, 11, 14, 15); let r = _mm_max_epu16(a, b); @@ -1322,7 +1365,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_max_epi32() { + const unsafe fn test_mm_max_epi32() { let a = _mm_setr_epi32(1, 4, 5, 8); let b = _mm_setr_epi32(2, 3, 6, 7); let r = _mm_max_epi32(a, b); @@ -1331,7 +1374,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_max_epu32() { + const unsafe fn test_mm_max_epu32() { let a = _mm_setr_epi32(1, 4, 5, 8); let b = _mm_setr_epi32(2, 3, 6, 7); let r = _mm_max_epu32(a, b); @@ -1340,7 +1383,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_min_epi8() { + const unsafe fn test_mm_min_epi8() { #[rustfmt::skip] let a = _mm_setr_epi8( 1, 4, 5, 8, 9, 12, 13, 16, @@ -1379,7 +1422,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_min_epu16() { + const unsafe fn test_mm_min_epu16() { let a = _mm_setr_epi16(1, 4, 5, 8, 9, 12, 13, 16); let b = _mm_setr_epi16(2, 3, 6, 7, 10, 11, 14, 15); let r = _mm_min_epu16(a, b); @@ -1388,7 +1431,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_min_epi32() { + const unsafe fn test_mm_min_epi32() { let a = _mm_setr_epi32(1, 4, 5, 8); let b = _mm_setr_epi32(2, 3, 6, 7); let r = _mm_min_epi32(a, b); @@ -1403,7 +1446,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_min_epu32() { + const unsafe fn test_mm_min_epu32() { let a = _mm_setr_epi32(1, 4, 5, 8); let b = _mm_setr_epi32(2, 3, 6, 7); let r = _mm_min_epu32(a, b); @@ -1421,7 +1464,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cmpeq_epi64() { + const unsafe fn test_mm_cmpeq_epi64() { let a = _mm_setr_epi64x(0, 1); let b = _mm_setr_epi64x(0, 0); let r = _mm_cmpeq_epi64(a, b); @@ -1430,7 +1473,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepi8_epi16() { + const unsafe fn test_mm_cvtepi8_epi16() { let a = _mm_set1_epi8(10); let r = _mm_cvtepi8_epi16(a); let e = _mm_set1_epi16(10); @@ -1442,7 +1485,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepi8_epi32() { + const unsafe fn test_mm_cvtepi8_epi32() { let a = _mm_set1_epi8(10); let r = _mm_cvtepi8_epi32(a); let e = _mm_set1_epi32(10); @@ -1454,7 +1497,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepi8_epi64() { + const unsafe fn test_mm_cvtepi8_epi64() { let a = _mm_set1_epi8(10); let r = _mm_cvtepi8_epi64(a); let e = _mm_set1_epi64x(10); @@ -1466,7 +1509,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepi16_epi32() { + const unsafe fn test_mm_cvtepi16_epi32() { let a = _mm_set1_epi16(10); let r = _mm_cvtepi16_epi32(a); let e = _mm_set1_epi32(10); @@ -1478,7 +1521,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepi16_epi64() { + const unsafe fn test_mm_cvtepi16_epi64() { let a = _mm_set1_epi16(10); let r = _mm_cvtepi16_epi64(a); let e = _mm_set1_epi64x(10); @@ -1490,7 +1533,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepi32_epi64() { + const unsafe fn test_mm_cvtepi32_epi64() { let a = _mm_set1_epi32(10); let r = _mm_cvtepi32_epi64(a); let e = _mm_set1_epi64x(10); @@ -1502,7 +1545,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepu8_epi16() { + const unsafe fn test_mm_cvtepu8_epi16() { let a = _mm_set1_epi8(10); let r = _mm_cvtepu8_epi16(a); let e = _mm_set1_epi16(10); @@ -1510,7 +1553,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepu8_epi32() { + const unsafe fn test_mm_cvtepu8_epi32() { let a = _mm_set1_epi8(10); let r = _mm_cvtepu8_epi32(a); let e = _mm_set1_epi32(10); @@ -1518,7 +1561,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepu8_epi64() { + const unsafe fn test_mm_cvtepu8_epi64() { let a = _mm_set1_epi8(10); let r = _mm_cvtepu8_epi64(a); let e = _mm_set1_epi64x(10); @@ -1526,7 +1569,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepu16_epi32() { + const unsafe fn test_mm_cvtepu16_epi32() { let a = _mm_set1_epi16(10); let r = _mm_cvtepu16_epi32(a); let e = _mm_set1_epi32(10); @@ -1534,7 +1577,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepu16_epi64() { + const unsafe fn test_mm_cvtepu16_epi64() { let a = _mm_set1_epi16(10); let r = _mm_cvtepu16_epi64(a); let e = _mm_set1_epi64x(10); @@ -1542,7 +1585,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_cvtepu32_epi64() { + const unsafe fn test_mm_cvtepu32_epi64() { let a = _mm_set1_epi32(10); let r = _mm_cvtepu32_epi64(a); let e = _mm_set1_epi64x(10); @@ -1566,7 +1609,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_floor_pd() { + const unsafe fn test_mm_floor_pd() { let a = _mm_setr_pd(2.5, 4.5); let r = _mm_floor_pd(a); let e = _mm_setr_pd(2.0, 4.0); @@ -1574,7 +1617,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_floor_ps() { + const unsafe fn test_mm_floor_ps() { let a = _mm_setr_ps(2.5, 4.5, 8.5, 16.5); let r = _mm_floor_ps(a); let e = _mm_setr_ps(2.0, 4.0, 8.0, 16.0); @@ -1600,7 +1643,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_ceil_pd() { + const unsafe fn test_mm_ceil_pd() { let a = _mm_setr_pd(1.5, 3.5); let r = _mm_ceil_pd(a); let e = _mm_setr_pd(2.0, 4.0); @@ -1608,7 +1651,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_ceil_ps() { + const unsafe fn test_mm_ceil_ps() { let a = _mm_setr_ps(1.5, 3.5, 7.5, 15.5); let r = _mm_ceil_ps(a); let e = _mm_setr_ps(2.0, 4.0, 8.0, 16.0); @@ -1729,7 +1772,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_mul_epi32() { + const unsafe fn test_mm_mul_epi32() { { let a = _mm_setr_epi32(1, 1, 1, 1); let b = _mm_setr_epi32(1, 2, 3, 4); @@ -1750,7 +1793,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_mullo_epi32() { + const unsafe fn test_mm_mullo_epi32() { { let a = _mm_setr_epi32(1, 1, 1, 1); let b = _mm_setr_epi32(1, 2, 3, 4); @@ -1808,7 +1851,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_testz_si128() { + const unsafe fn test_mm_testz_si128() { let a = _mm_set1_epi8(1); let mask = _mm_set1_epi8(0); let r = _mm_testz_si128(a, mask); @@ -1824,7 +1867,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_testc_si128() { + const unsafe fn test_mm_testc_si128() { let a = _mm_set1_epi8(-1); let mask = _mm_set1_epi8(0); let r = _mm_testc_si128(a, mask); @@ -1860,7 +1903,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_test_all_zeros() { + const unsafe fn test_mm_test_all_zeros() { let a = _mm_set1_epi8(1); let mask = _mm_set1_epi8(0); let r = _mm_test_all_zeros(a, mask); @@ -1876,7 +1919,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_test_all_ones() { + const unsafe fn test_mm_test_all_ones() { let a = _mm_set1_epi8(-1); let r = _mm_test_all_ones(a); assert_eq!(r, 1); diff --git a/crates/core_arch/src/x86/sse42.rs b/crates/core_arch/src/x86/sse42.rs index 83c51f2b70..4d3c481fe1 100644 --- a/crates/core_arch/src/x86/sse42.rs +++ b/crates/core_arch/src/x86/sse42.rs @@ -563,7 +563,8 @@ pub fn _mm_crc32_u32(crc: u32, v: u32) -> u32 { #[target_feature(enable = "sse4.2")] #[cfg_attr(test, assert_instr(pcmpgtq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cmpgt_epi64(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cmpgt_epi64(a: __m128i, b: __m128i) -> __m128i { unsafe { transmute(simd_gt::<_, i64x2>(a.as_i64x2(), b.as_i64x2())) } } @@ -609,6 +610,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86::*; @@ -789,7 +791,7 @@ mod tests { } #[simd_test(enable = "sse4.2")] - unsafe fn test_mm_cmpgt_epi64() { + const unsafe fn test_mm_cmpgt_epi64() { let a = _mm_setr_epi64x(0, 0x2a); let b = _mm_set1_epi64x(0x00); let i = _mm_cmpgt_epi64(a, b); diff --git a/crates/core_arch/src/x86/ssse3.rs b/crates/core_arch/src/x86/ssse3.rs index ac067bd4b5..c44da6eb97 100644 --- a/crates/core_arch/src/x86/ssse3.rs +++ b/crates/core_arch/src/x86/ssse3.rs @@ -16,7 +16,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "ssse3")] #[cfg_attr(test, assert_instr(pabsb))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_abs_epi8(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_abs_epi8(a: __m128i) -> __m128i { unsafe { let a = a.as_i8x16(); let zero = i8x16::ZERO; @@ -34,7 +35,8 @@ pub fn _mm_abs_epi8(a: __m128i) -> __m128i { #[target_feature(enable = "ssse3")] #[cfg_attr(test, assert_instr(pabsw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_abs_epi16(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_abs_epi16(a: __m128i) -> __m128i { unsafe { let a = a.as_i16x8(); let zero = i16x8::ZERO; @@ -52,7 +54,8 @@ pub fn _mm_abs_epi16(a: __m128i) -> __m128i { #[target_feature(enable = "ssse3")] #[cfg_attr(test, assert_instr(pabsd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_abs_epi32(a: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_abs_epi32(a: __m128i) -> __m128i { unsafe { let a = a.as_i32x4(); let zero = i32x4::ZERO; @@ -104,7 +107,8 @@ pub fn _mm_shuffle_epi8(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(palignr, IMM8 = 15))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_alignr_epi8(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_alignr_epi8(a: __m128i, b: __m128i) -> __m128i { static_assert_uimm_bits!(IMM8, 8); // If palignr is shifting the pair of vectors more than the size of two // lanes, emit zero. @@ -163,7 +167,8 @@ pub fn _mm_alignr_epi8(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "ssse3")] #[cfg_attr(test, assert_instr(phaddw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_hadd_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_hadd_epi16(a: __m128i, b: __m128i) -> __m128i { let a = a.as_i16x8(); let b = b.as_i16x8(); unsafe { @@ -194,7 +199,8 @@ pub fn _mm_hadds_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "ssse3")] #[cfg_attr(test, assert_instr(phaddd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_hadd_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_hadd_epi32(a: __m128i, b: __m128i) -> __m128i { let a = a.as_i32x4(); let b = b.as_i32x4(); unsafe { @@ -212,7 +218,8 @@ pub fn _mm_hadd_epi32(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "ssse3")] #[cfg_attr(test, assert_instr(phsubw))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_hsub_epi16(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_hsub_epi16(a: __m128i, b: __m128i) -> __m128i { let a = a.as_i16x8(); let b = b.as_i16x8(); unsafe { @@ -244,7 +251,8 @@ pub fn _mm_hsubs_epi16(a: __m128i, b: __m128i) -> __m128i { #[target_feature(enable = "ssse3")] #[cfg_attr(test, assert_instr(phsubd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_hsub_epi32(a: __m128i, b: __m128i) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_hsub_epi32(a: __m128i, b: __m128i) -> __m128i { let a = a.as_i32x4(); let b = b.as_i32x4(); unsafe { @@ -353,24 +361,25 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86::*; #[simd_test(enable = "ssse3")] - unsafe fn test_mm_abs_epi8() { + const unsafe fn test_mm_abs_epi8() { let r = _mm_abs_epi8(_mm_set1_epi8(-5)); assert_eq_m128i(r, _mm_set1_epi8(5)); } #[simd_test(enable = "ssse3")] - unsafe fn test_mm_abs_epi16() { + const unsafe fn test_mm_abs_epi16() { let r = _mm_abs_epi16(_mm_set1_epi16(-5)); assert_eq_m128i(r, _mm_set1_epi16(5)); } #[simd_test(enable = "ssse3")] - unsafe fn test_mm_abs_epi32() { + const unsafe fn test_mm_abs_epi32() { let r = _mm_abs_epi32(_mm_set1_epi32(-5)); assert_eq_m128i(r, _mm_set1_epi32(5)); } @@ -400,7 +409,7 @@ mod tests { } #[simd_test(enable = "ssse3")] - unsafe fn test_mm_alignr_epi8() { + const unsafe fn test_mm_alignr_epi8() { #[rustfmt::skip] let a = _mm_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, @@ -440,7 +449,7 @@ mod tests { } #[simd_test(enable = "ssse3")] - unsafe fn test_mm_hadd_epi16() { + const unsafe fn test_mm_hadd_epi16() { let a = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_setr_epi16(4, 128, 4, 3, 24, 12, 6, 19); let expected = _mm_setr_epi16(3, 7, 11, 15, 132, 7, 36, 25); @@ -490,7 +499,7 @@ mod tests { } #[simd_test(enable = "ssse3")] - unsafe fn test_mm_hadd_epi32() { + const unsafe fn test_mm_hadd_epi32() { let a = _mm_setr_epi32(1, 2, 3, 4); let b = _mm_setr_epi32(4, 128, 4, 3); let expected = _mm_setr_epi32(3, 7, 132, 7); @@ -506,7 +515,7 @@ mod tests { } #[simd_test(enable = "ssse3")] - unsafe fn test_mm_hsub_epi16() { + const unsafe fn test_mm_hsub_epi16() { let a = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm_setr_epi16(4, 128, 4, 3, 24, 12, 6, 19); let expected = _mm_setr_epi16(-1, -1, -1, -1, -124, 1, 12, -13); @@ -556,7 +565,7 @@ mod tests { } #[simd_test(enable = "ssse3")] - unsafe fn test_mm_hsub_epi32() { + const unsafe fn test_mm_hsub_epi32() { let a = _mm_setr_epi32(1, 2, 3, 4); let b = _mm_setr_epi32(4, 128, 4, 3); let expected = _mm_setr_epi32(-1, -1, -124, 1); diff --git a/crates/core_arch/src/x86/tbm.rs b/crates/core_arch/src/x86/tbm.rs index 5a01752d8a..d7769b916d 100644 --- a/crates/core_arch/src/x86/tbm.rs +++ b/crates/core_arch/src/x86/tbm.rs @@ -42,7 +42,8 @@ pub fn _bextri_u32(a: u32) -> u32 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blcfill))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blcfill_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blcfill_u32(x: u32) -> u32 { x & (x.wrapping_add(1)) } @@ -53,7 +54,8 @@ pub fn _blcfill_u32(x: u32) -> u32 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blci))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blci_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blci_u32(x: u32) -> u32 { x | !x.wrapping_add(1) } @@ -64,7 +66,8 @@ pub fn _blci_u32(x: u32) -> u32 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blcic))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blcic_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blcic_u32(x: u32) -> u32 { !x & x.wrapping_add(1) } @@ -76,7 +79,8 @@ pub fn _blcic_u32(x: u32) -> u32 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blcmsk))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blcmsk_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blcmsk_u32(x: u32) -> u32 { x ^ x.wrapping_add(1) } @@ -87,7 +91,8 @@ pub fn _blcmsk_u32(x: u32) -> u32 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blcs))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blcs_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blcs_u32(x: u32) -> u32 { x | x.wrapping_add(1) } @@ -98,7 +103,8 @@ pub fn _blcs_u32(x: u32) -> u32 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blsfill))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blsfill_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blsfill_u32(x: u32) -> u32 { x | x.wrapping_sub(1) } @@ -109,7 +115,8 @@ pub fn _blsfill_u32(x: u32) -> u32 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blsic))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blsic_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blsic_u32(x: u32) -> u32 { !x | x.wrapping_sub(1) } @@ -121,7 +128,8 @@ pub fn _blsic_u32(x: u32) -> u32 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(t1mskc))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _t1mskc_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _t1mskc_u32(x: u32) -> u32 { !x | x.wrapping_add(1) } @@ -133,12 +141,14 @@ pub fn _t1mskc_u32(x: u32) -> u32 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(tzmsk))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _tzmsk_u32(x: u32) -> u32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _tzmsk_u32(x: u32) -> u32 { !x & x.wrapping_sub(1) } #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86::*; @@ -149,13 +159,13 @@ mod tests { } #[simd_test(enable = "tbm")] - unsafe fn test_blcfill_u32() { + const unsafe fn test_blcfill_u32() { assert_eq!(_blcfill_u32(0b0101_0111u32), 0b0101_0000u32); assert_eq!(_blcfill_u32(0b1111_1111u32), 0u32); } #[simd_test(enable = "tbm")] - unsafe fn test_blci_u32() { + const unsafe fn test_blci_u32() { assert_eq!( _blci_u32(0b0101_0000u32), 0b1111_1111_1111_1111_1111_1111_1111_1110u32 @@ -167,25 +177,25 @@ mod tests { } #[simd_test(enable = "tbm")] - unsafe fn test_blcic_u32() { + const unsafe fn test_blcic_u32() { assert_eq!(_blcic_u32(0b0101_0001u32), 0b0000_0010u32); assert_eq!(_blcic_u32(0b1111_1111u32), 0b1_0000_0000u32); } #[simd_test(enable = "tbm")] - unsafe fn test_blcmsk_u32() { + const unsafe fn test_blcmsk_u32() { assert_eq!(_blcmsk_u32(0b0101_0001u32), 0b0000_0011u32); assert_eq!(_blcmsk_u32(0b1111_1111u32), 0b1_1111_1111u32); } #[simd_test(enable = "tbm")] - unsafe fn test_blcs_u32() { + const unsafe fn test_blcs_u32() { assert_eq!(_blcs_u32(0b0101_0001u32), 0b0101_0011u32); assert_eq!(_blcs_u32(0b1111_1111u32), 0b1_1111_1111u32); } #[simd_test(enable = "tbm")] - unsafe fn test_blsfill_u32() { + const unsafe fn test_blsfill_u32() { assert_eq!(_blsfill_u32(0b0101_0100u32), 0b0101_0111u32); assert_eq!( _blsfill_u32(0u32), @@ -194,7 +204,7 @@ mod tests { } #[simd_test(enable = "tbm")] - unsafe fn test_blsic_u32() { + const unsafe fn test_blsic_u32() { assert_eq!( _blsic_u32(0b0101_0100u32), 0b1111_1111_1111_1111_1111_1111_1111_1011u32 @@ -206,7 +216,7 @@ mod tests { } #[simd_test(enable = "tbm")] - unsafe fn test_t1mskc_u32() { + const unsafe fn test_t1mskc_u32() { assert_eq!( _t1mskc_u32(0b0101_0111u32), 0b1111_1111_1111_1111_1111_1111_1111_1000u32 @@ -218,7 +228,7 @@ mod tests { } #[simd_test(enable = "tbm")] - unsafe fn test_tzmsk_u32() { + const unsafe fn test_tzmsk_u32() { assert_eq!(_tzmsk_u32(0b0101_1000u32), 0b0000_0111u32); assert_eq!(_tzmsk_u32(0b0101_1001u32), 0b0000_0000u32); } diff --git a/crates/core_arch/src/x86/test.rs b/crates/core_arch/src/x86/test.rs index fec25ce2bc..9fa88fc98d 100644 --- a/crates/core_arch/src/x86/test.rs +++ b/crates/core_arch/src/x86/test.rs @@ -1,115 +1,228 @@ //! Utilities used in testing the x86 intrinsics +use crate::core_arch::assert_eq_const as assert_eq; use crate::core_arch::x86::*; use std::mem::transmute; #[track_caller] #[target_feature(enable = "sse2")] -pub unsafe fn assert_eq_m128i(a: __m128i, b: __m128i) { +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(crate) const unsafe fn assert_eq_m128i(a: __m128i, b: __m128i) { assert_eq!(transmute::<_, [u64; 2]>(a), transmute::<_, [u64; 2]>(b)) } #[track_caller] -#[target_feature(enable = "sse2")] -pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) { - if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 { - panic!("{:?} != {:?}", a, b); - } +#[target_feature(enable = "avx")] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(crate) const unsafe fn assert_eq_m256i(a: __m256i, b: __m256i) { + assert_eq!(transmute::<_, [u64; 4]>(a), transmute::<_, [u64; 4]>(b)) } -#[target_feature(enable = "sse2")] -pub unsafe fn get_m128d(a: __m128d, idx: usize) -> f64 { - transmute::<_, [f64; 2]>(a)[idx] +#[track_caller] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(crate) const unsafe fn assert_eq_m512i(a: __m512i, b: __m512i) { + assert_eq!(transmute::<_, [i32; 16]>(a), transmute::<_, [i32; 16]>(b)) } -#[track_caller] -#[target_feature(enable = "sse")] -pub unsafe fn assert_eq_m128(a: __m128, b: __m128) { - let r = _mm_cmpeq_ps(a, b); - if _mm_movemask_ps(r) != 0b1111 { - panic!("{:?} != {:?}", a, b); +macro_rules! make_ct_rt { + ($( + $( #[$meta:meta] )* + $vis:vis unsafe fn $name:ident ($($param:ident : $type:ty),* $(,)?) { + ct: $ct:expr; + rt: $rt:expr; + } + )*) => {$( + $( #[$meta] )* + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + $vis const unsafe fn $name ($($param : $type),*) { + #[inline(always)] + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + const fn ct($($param : $type),*) { + $ct + } + #[inline(always)] + fn rt($($param : $type),*) { + $rt + } + + $crate::intrinsics::const_eval_select(($($param),*), ct, rt) + } + )*} +} + +make_ct_rt! { + // SAFETY: we can use simple float equality because when this should only be used in const + // context where Intel peculiarities don't appear + + #[track_caller] + #[target_feature(enable = "sse2")] + pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) { + ct: unsafe { + assert_eq!(transmute::<_, [f64; 2]>(a), transmute::<_, [f64; 2]>(b)) + }; + rt: unsafe { + if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 { + panic!("{:?} != {:?}", a, b); + } + }; } -} -#[target_feature(enable = "sse")] -pub unsafe fn get_m128(a: __m128, idx: usize) -> f32 { - transmute::<_, [f32; 4]>(a)[idx] -} + #[track_caller] + #[target_feature(enable = "sse")] + pub unsafe fn assert_eq_m128(a: __m128, b: __m128) { + ct: unsafe { + assert_eq!(transmute::<_, [f32; 4]>(a), transmute::<_, [f32; 4]>(b)) + }; + rt: unsafe { + let r = _mm_cmpeq_ps(a, b); + if _mm_movemask_ps(r) != 0b1111 { + panic!("{:?} != {:?}", a, b); + } + }; + } -#[track_caller] -#[target_feature(enable = "avx512fp16,avx512vl")] -pub unsafe fn assert_eq_m128h(a: __m128h, b: __m128h) { - let r = _mm_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); - if r != 0b1111_1111 { - panic!("{:?} != {:?}", a, b); + #[track_caller] + #[target_feature(enable = "avx512fp16,avx512vl")] + pub unsafe fn assert_eq_m128h(a: __m128h, b: __m128h) { + ct: unsafe { + assert_eq!(transmute::<_, [f16; 8]>(a), transmute::<_, [f16; 8]>(b)) + }; + rt: unsafe { + let r = _mm_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + if r != 0b1111_1111 { + panic!("{:?} != {:?}", a, b); + } + }; } -} -// not actually an intrinsic but useful in various tests as we proted from -// `i64x2::new` which is backwards from `_mm_set_epi64x` -#[target_feature(enable = "sse2")] -pub unsafe fn _mm_setr_epi64x(a: i64, b: i64) -> __m128i { - _mm_set_epi64x(b, a) -} + #[track_caller] + #[target_feature(enable = "avx")] + pub unsafe fn assert_eq_m256d(a: __m256d, b: __m256d) { + ct: unsafe { + assert_eq!(transmute::<_, [f64; 4]>(a), transmute::<_, [f64; 4]>(b)) + }; + rt: unsafe { + let cmp = _mm256_cmp_pd::<_CMP_EQ_OQ>(a, b); + if _mm256_movemask_pd(cmp) != 0b1111 { + panic!("{:?} != {:?}", a, b); + } + }; + } -#[track_caller] -#[target_feature(enable = "avx")] -pub unsafe fn assert_eq_m256i(a: __m256i, b: __m256i) { - assert_eq!(transmute::<_, [u64; 4]>(a), transmute::<_, [u64; 4]>(b)) -} + #[track_caller] + #[target_feature(enable = "avx")] + pub unsafe fn assert_eq_m256(a: __m256, b: __m256) { + ct: unsafe { + assert_eq!(transmute::<_, [f32; 8]>(a), transmute::<_, [f32; 8]>(b)) + }; + rt: unsafe { + let cmp = _mm256_cmp_ps::<_CMP_EQ_OQ>(a, b); + if _mm256_movemask_ps(cmp) != 0b11111111 { + panic!("{:?} != {:?}", a, b); + } + }; + } -#[track_caller] -#[target_feature(enable = "avx")] -pub unsafe fn assert_eq_m256d(a: __m256d, b: __m256d) { - let cmp = _mm256_cmp_pd::<_CMP_EQ_OQ>(a, b); - if _mm256_movemask_pd(cmp) != 0b1111 { - panic!("{:?} != {:?}", a, b); + #[track_caller] + #[target_feature(enable = "avx512fp16,avx512vl")] + pub unsafe fn assert_eq_m256h(a: __m256h, b: __m256h) { + ct: unsafe { + assert_eq!(transmute::<_, [f16; 16]>(a), transmute::<_, [f16; 16]>(b)) + }; + rt: unsafe { + let r = _mm256_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + if r != 0b11111111_11111111 { + panic!("{:?} != {:?}", a, b); + } + }; + } + + #[track_caller] + #[target_feature(enable = "avx512f")] + pub unsafe fn assert_eq_m512d(a: __m512d, b: __m512d) { + ct: unsafe { + assert_eq!(transmute::<_, [f64; 8]>(a), transmute::<_, [f64; 8]>(b)) + }; + rt: unsafe { + let cmp = _mm512_cmp_pd_mask::<_CMP_EQ_OQ>(a, b); + if cmp != 0b11111111 { + panic!("{:?} != {:?}", a, b); + } + }; + } + + #[track_caller] + #[target_feature(enable = "avx512f")] + pub unsafe fn assert_eq_m512(a: __m512, b: __m512) { + ct: unsafe { + assert_eq!(transmute::<_, [f32; 16]>(a), transmute::<_, [f32; 16]>(b)) + }; + rt: unsafe { + let cmp = _mm512_cmp_ps_mask::<_CMP_EQ_OQ>(a, b); + if cmp != 0b11111111_11111111 { + panic!("{:?} != {:?}", a, b); + } + }; + } + + #[track_caller] + #[target_feature(enable = "avx512fp16")] + pub unsafe fn assert_eq_m512h(a: __m512h, b: __m512h) { + ct: unsafe { + assert_eq!(transmute::<_, [f16; 32]>(a), transmute::<_, [f16; 32]>(b)) + }; + rt: unsafe { + let r = _mm512_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + if r != 0b11111111_11111111_11111111_11111111 { + panic!("{:?} != {:?}", a, b); + } + }; } } -#[target_feature(enable = "avx")] -pub unsafe fn get_m256d(a: __m256d, idx: usize) -> f64 { - transmute::<_, [f64; 4]>(a)[idx] +#[target_feature(enable = "sse2")] +pub(crate) const unsafe fn get_m128d(a: __m128d, idx: usize) -> f64 { + transmute::<_, [f64; 2]>(a)[idx] } -#[track_caller] -#[target_feature(enable = "avx")] -pub unsafe fn assert_eq_m256(a: __m256, b: __m256) { - let cmp = _mm256_cmp_ps::<_CMP_EQ_OQ>(a, b); - if _mm256_movemask_ps(cmp) != 0b11111111 { - panic!("{:?} != {:?}", a, b); - } +#[target_feature(enable = "sse")] +pub(crate) const unsafe fn get_m128(a: __m128, idx: usize) -> f32 { + transmute::<_, [f32; 4]>(a)[idx] } #[target_feature(enable = "avx")] -pub unsafe fn get_m256(a: __m256, idx: usize) -> f32 { - transmute::<_, [f32; 8]>(a)[idx] +pub(crate) const unsafe fn get_m256d(a: __m256d, idx: usize) -> f64 { + transmute::<_, [f64; 4]>(a)[idx] } -#[track_caller] -#[target_feature(enable = "avx512fp16,avx512vl")] -pub unsafe fn assert_eq_m256h(a: __m256h, b: __m256h) { - let r = _mm256_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); - if r != 0b11111111_11111111 { - panic!("{:?} != {:?}", a, b); - } +#[target_feature(enable = "avx")] +pub(crate) const unsafe fn get_m256(a: __m256, idx: usize) -> f32 { + transmute::<_, [f32; 8]>(a)[idx] } #[target_feature(enable = "avx512f")] -pub unsafe fn get_m512(a: __m512, idx: usize) -> f32 { +pub(crate) const unsafe fn get_m512(a: __m512, idx: usize) -> f32 { transmute::<_, [f32; 16]>(a)[idx] } #[target_feature(enable = "avx512f")] -pub unsafe fn get_m512d(a: __m512d, idx: usize) -> f64 { +pub(crate) const unsafe fn get_m512d(a: __m512d, idx: usize) -> f64 { transmute::<_, [f64; 8]>(a)[idx] } #[target_feature(enable = "avx512f")] -pub unsafe fn get_m512i(a: __m512i, idx: usize) -> i64 { +pub(crate) const unsafe fn get_m512i(a: __m512i, idx: usize) -> i64 { transmute::<_, [i64; 8]>(a)[idx] } +// not actually an intrinsic but useful in various tests as we ported from +// `i64x2::new` which is backwards from `_mm_set_epi64x` +#[target_feature(enable = "sse2")] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub const unsafe fn _mm_setr_epi64x(a: i64, b: i64) -> __m128i { + _mm_set_epi64x(b, a) +} + // These intrinsics doesn't exist on x86 b/c it requires a 64-bit register, // which doesn't exist on x86! #[cfg(target_arch = "x86")] @@ -118,14 +231,16 @@ mod x86_polyfill { use crate::intrinsics::simd::*; #[rustc_legacy_const_generics(2)] - pub unsafe fn _mm_insert_epi64(a: __m128i, val: i64) -> __m128i { + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + pub const unsafe fn _mm_insert_epi64(a: __m128i, val: i64) -> __m128i { static_assert_uimm_bits!(INDEX, 1); transmute(simd_insert!(a.as_i64x2(), INDEX as u32, val)) } #[target_feature(enable = "avx2")] #[rustc_legacy_const_generics(2)] - pub unsafe fn _mm256_insert_epi64(a: __m256i, val: i64) -> __m256i { + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + pub const unsafe fn _mm256_insert_epi64(a: __m256i, val: i64) -> __m256i { static_assert_uimm_bits!(INDEX, 2); transmute(simd_insert!(a.as_i64x4(), INDEX as u32, val)) } @@ -136,33 +251,3 @@ mod x86_polyfill { pub use crate::core_arch::x86_64::{_mm_insert_epi64, _mm256_insert_epi64}; } pub use self::x86_polyfill::*; - -#[track_caller] -pub unsafe fn assert_eq_m512i(a: __m512i, b: __m512i) { - assert_eq!(transmute::<_, [i32; 16]>(a), transmute::<_, [i32; 16]>(b)) -} - -#[track_caller] -pub unsafe fn assert_eq_m512(a: __m512, b: __m512) { - let cmp = _mm512_cmp_ps_mask::<_CMP_EQ_OQ>(a, b); - if cmp != 0b11111111_11111111 { - panic!("{:?} != {:?}", a, b); - } -} - -#[track_caller] -pub unsafe fn assert_eq_m512d(a: __m512d, b: __m512d) { - let cmp = _mm512_cmp_pd_mask::<_CMP_EQ_OQ>(a, b); - if cmp != 0b11111111 { - panic!("{:?} != {:?}", a, b); - } -} - -#[track_caller] -#[target_feature(enable = "avx512fp16")] -pub unsafe fn assert_eq_m512h(a: __m512h, b: __m512h) { - let r = _mm512_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); - if r != 0b11111111_11111111_11111111_11111111 { - panic!("{:?} != {:?}", a, b); - } -} diff --git a/crates/core_arch/src/x86_64/abm.rs b/crates/core_arch/src/x86_64/abm.rs index bf59cc4632..63e422864d 100644 --- a/crates/core_arch/src/x86_64/abm.rs +++ b/crates/core_arch/src/x86_64/abm.rs @@ -29,7 +29,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "lzcnt")] #[cfg_attr(test, assert_instr(lzcnt))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _lzcnt_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _lzcnt_u64(x: u64) -> u64 { x.leading_zeros() as u64 } @@ -40,23 +41,25 @@ pub fn _lzcnt_u64(x: u64) -> u64 { #[target_feature(enable = "popcnt")] #[cfg_attr(test, assert_instr(popcnt))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _popcnt64(x: i64) -> i32 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _popcnt64(x: i64) -> i32 { x.count_ones() as i32 } #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::arch::x86_64::*; #[simd_test(enable = "lzcnt")] - unsafe fn test_lzcnt_u64() { + const unsafe fn test_lzcnt_u64() { assert_eq!(_lzcnt_u64(0b0101_1010), 57); } #[simd_test(enable = "popcnt")] - unsafe fn test_popcnt64() { + const unsafe fn test_popcnt64() { assert_eq!(_popcnt64(0b0101_1010), 4); } } diff --git a/crates/core_arch/src/x86_64/avx.rs b/crates/core_arch/src/x86_64/avx.rs index b494385e4a..bb8d256207 100644 --- a/crates/core_arch/src/x86_64/avx.rs +++ b/crates/core_arch/src/x86_64/avx.rs @@ -24,7 +24,8 @@ use crate::{core_arch::x86::*, mem::transmute}; #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_insert_epi64(a: __m256i, i: i64) -> __m256i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_insert_epi64(a: __m256i, i: i64) -> __m256i { static_assert_uimm_bits!(INDEX, 2); unsafe { transmute(simd_insert!(a.as_i64x4(), INDEX as u32, i)) } } @@ -37,19 +38,21 @@ pub fn _mm256_insert_epi64(a: __m256i, i: i64) -> __m256i { #[rustc_legacy_const_generics(1)] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm256_extract_epi64(a: __m256i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm256_extract_epi64(a: __m256i) -> i64 { static_assert_uimm_bits!(INDEX, 2); unsafe { simd_extract!(a.as_i64x4(), INDEX as u32) } } #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::arch::x86_64::*; #[simd_test(enable = "avx")] - unsafe fn test_mm256_insert_epi64() { + const unsafe fn test_mm256_insert_epi64() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let r = _mm256_insert_epi64::<3>(a, 0); let e = _mm256_setr_epi64x(1, 2, 3, 0); @@ -57,7 +60,7 @@ mod tests { } #[simd_test(enable = "avx")] - unsafe fn test_mm256_extract_epi64() { + const unsafe fn test_mm256_extract_epi64() { let a = _mm256_setr_epi64x(0, 1, 2, 3); let r = _mm256_extract_epi64::<3>(a); assert_eq!(r, 3); diff --git a/crates/core_arch/src/x86_64/avx512bw.rs b/crates/core_arch/src/x86_64/avx512bw.rs index 466c36ef31..a8e5183e0c 100644 --- a/crates/core_arch/src/x86_64/avx512bw.rs +++ b/crates/core_arch/src/x86_64/avx512bw.rs @@ -6,7 +6,8 @@ use crate::core_arch::x86::*; #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _cvtmask64_u64(a: __mmask64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _cvtmask64_u64(a: __mmask64) -> u64 { a } @@ -16,19 +17,21 @@ pub fn _cvtmask64_u64(a: __mmask64) -> u64 { #[inline] #[target_feature(enable = "avx512bw")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] -pub fn _cvtu64_mask64(a: u64) -> __mmask64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _cvtu64_mask64(a: u64) -> __mmask64 { a } #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::{x86::*, x86_64::*}; #[simd_test(enable = "avx512bw")] - unsafe fn test_cvtmask64_u64() { + const unsafe fn test_cvtmask64_u64() { let a: __mmask64 = 0b11001100_00110011_01100110_10011001; let r = _cvtmask64_u64(a); let e: u64 = 0b11001100_00110011_01100110_10011001; @@ -36,7 +39,7 @@ mod tests { } #[simd_test(enable = "avx512bw")] - unsafe fn test_cvtu64_mask64() { + const unsafe fn test_cvtu64_mask64() { let a: u64 = 0b11001100_00110011_01100110_10011001; let r = _cvtu64_mask64(a); let e: __mmask64 = 0b11001100_00110011_01100110_10011001; diff --git a/crates/core_arch/src/x86_64/avx512f.rs b/crates/core_arch/src/x86_64/avx512f.rs index a2656c8535..c85a5580c8 100644 --- a/crates/core_arch/src/x86_64/avx512f.rs +++ b/crates/core_arch/src/x86_64/avx512f.rs @@ -57,7 +57,8 @@ pub fn _mm_cvtsd_u64(a: __m128d) -> u64 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsi2ss))] -pub fn _mm_cvti64_ss(a: __m128, b: i64) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvti64_ss(a: __m128, b: i64) -> __m128 { unsafe { let b = b as f32; simd_insert!(a, 0, b) @@ -71,7 +72,8 @@ pub fn _mm_cvti64_ss(a: __m128, b: i64) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtsi2sd))] -pub fn _mm_cvti64_sd(a: __m128d, b: i64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvti64_sd(a: __m128d, b: i64) -> __m128d { unsafe { let b = b as f64; simd_insert!(a, 0, b) @@ -85,7 +87,8 @@ pub fn _mm_cvti64_sd(a: __m128d, b: i64) -> __m128d { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtusi2ss))] -pub fn _mm_cvtu64_ss(a: __m128, b: u64) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtu64_ss(a: __m128, b: u64) -> __m128 { unsafe { let b = b as f32; simd_insert!(a, 0, b) @@ -99,7 +102,8 @@ pub fn _mm_cvtu64_ss(a: __m128, b: u64) -> __m128 { #[target_feature(enable = "avx512f")] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvtusi2sd))] -pub fn _mm_cvtu64_sd(a: __m128d, b: u64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtu64_sd(a: __m128d, b: u64) -> __m128d { unsafe { let b = b as f64; simd_insert!(a, 0, b) @@ -554,6 +558,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; @@ -562,7 +567,7 @@ mod tests { use crate::hint::black_box; #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_abs_epi64() { + const unsafe fn test_mm512_abs_epi64() { let a = _mm512_set_epi64(0, 1, -1, i64::MAX, i64::MIN, 100, -100, -32); let r = _mm512_abs_epi64(a); let e = _mm512_set_epi64(0, 1, 1, i64::MAX, i64::MAX.wrapping_add(1), 100, 100, 32); @@ -570,7 +575,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_abs_epi64() { + const unsafe fn test_mm512_mask_abs_epi64() { let a = _mm512_set_epi64(0, 1, -1, i64::MAX, i64::MIN, 100, -100, -32); let r = _mm512_mask_abs_epi64(a, 0, a); assert_eq_m512i(r, a); @@ -580,7 +585,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_abs_epi64() { + const unsafe fn test_mm512_maskz_abs_epi64() { let a = _mm512_set_epi64(0, 1, -1, i64::MAX, i64::MIN, 100, -100, -32); let r = _mm512_maskz_abs_epi64(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -590,7 +595,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_abs_epi64() { + const unsafe fn test_mm256_abs_epi64() { let a = _mm256_set_epi64x(i64::MAX, i64::MIN, 100, -100); let r = _mm256_abs_epi64(a); let e = _mm256_set_epi64x(i64::MAX, i64::MAX.wrapping_add(1), 100, 100); @@ -598,7 +603,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_abs_epi64() { + const unsafe fn test_mm256_mask_abs_epi64() { let a = _mm256_set_epi64x(i64::MAX, i64::MIN, 100, -100); let r = _mm256_mask_abs_epi64(a, 0, a); assert_eq_m256i(r, a); @@ -608,7 +613,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_abs_epi64() { + const unsafe fn test_mm256_maskz_abs_epi64() { let a = _mm256_set_epi64x(i64::MAX, i64::MIN, 100, -100); let r = _mm256_maskz_abs_epi64(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -618,7 +623,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_abs_epi64() { + const unsafe fn test_mm_abs_epi64() { let a = _mm_set_epi64x(i64::MAX, i64::MIN); let r = _mm_abs_epi64(a); let e = _mm_set_epi64x(i64::MAX, i64::MAX.wrapping_add(1)); @@ -630,7 +635,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_abs_epi64() { + const unsafe fn test_mm_mask_abs_epi64() { let a = _mm_set_epi64x(i64::MAX, i64::MIN); let r = _mm_mask_abs_epi64(a, 0, a); assert_eq_m128i(r, a); @@ -644,7 +649,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_abs_epi64() { + const unsafe fn test_mm_maskz_abs_epi64() { let a = _mm_set_epi64x(i64::MAX, i64::MIN); let r = _mm_maskz_abs_epi64(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -658,7 +663,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_abs_pd() { + const unsafe fn test_mm512_abs_pd() { let a = _mm512_setr_pd(0., 1., -1., f64::MAX, f64::MIN, 100., -100., -32.); let r = _mm512_abs_pd(a); let e = _mm512_setr_pd(0., 1., 1., f64::MAX, f64::MAX, 100., 100., 32.); @@ -666,7 +671,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_abs_pd() { + const unsafe fn test_mm512_mask_abs_pd() { let a = _mm512_setr_pd(0., 1., -1., f64::MAX, f64::MIN, 100., -100., -32.); let r = _mm512_mask_abs_pd(a, 0, a); assert_eq_m512d(r, a); @@ -676,7 +681,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_mov_epi64() { + const unsafe fn test_mm512_mask_mov_epi64() { let src = _mm512_set1_epi64(1); let a = _mm512_set1_epi64(2); let r = _mm512_mask_mov_epi64(src, 0, a); @@ -686,7 +691,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_mov_epi64() { + const unsafe fn test_mm512_maskz_mov_epi64() { let a = _mm512_set1_epi64(2); let r = _mm512_maskz_mov_epi64(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -695,7 +700,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_mov_epi64() { + const unsafe fn test_mm256_mask_mov_epi64() { let src = _mm256_set1_epi64x(1); let a = _mm256_set1_epi64x(2); let r = _mm256_mask_mov_epi64(src, 0, a); @@ -705,7 +710,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_mov_epi64() { + const unsafe fn test_mm256_maskz_mov_epi64() { let a = _mm256_set1_epi64x(2); let r = _mm256_maskz_mov_epi64(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -714,7 +719,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_mov_epi64() { + const unsafe fn test_mm_mask_mov_epi64() { let src = _mm_set1_epi64x(1); let a = _mm_set1_epi64x(2); let r = _mm_mask_mov_epi64(src, 0, a); @@ -724,7 +729,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_mov_epi64() { + const unsafe fn test_mm_maskz_mov_epi64() { let a = _mm_set1_epi64x(2); let r = _mm_maskz_mov_epi64(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -733,7 +738,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_mov_pd() { + const unsafe fn test_mm512_mask_mov_pd() { let src = _mm512_set1_pd(1.); let a = _mm512_set1_pd(2.); let r = _mm512_mask_mov_pd(src, 0, a); @@ -743,7 +748,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_mov_pd() { + const unsafe fn test_mm512_maskz_mov_pd() { let a = _mm512_set1_pd(2.); let r = _mm512_maskz_mov_pd(0, a); assert_eq_m512d(r, _mm512_setzero_pd()); @@ -752,7 +757,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_mov_pd() { + const unsafe fn test_mm256_mask_mov_pd() { let src = _mm256_set1_pd(1.); let a = _mm256_set1_pd(2.); let r = _mm256_mask_mov_pd(src, 0, a); @@ -762,7 +767,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_mov_pd() { + const unsafe fn test_mm256_maskz_mov_pd() { let a = _mm256_set1_pd(2.); let r = _mm256_maskz_mov_pd(0, a); assert_eq_m256d(r, _mm256_setzero_pd()); @@ -771,7 +776,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_mov_pd() { + const unsafe fn test_mm_mask_mov_pd() { let src = _mm_set1_pd(1.); let a = _mm_set1_pd(2.); let r = _mm_mask_mov_pd(src, 0, a); @@ -781,7 +786,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_mov_pd() { + const unsafe fn test_mm_maskz_mov_pd() { let a = _mm_set1_pd(2.); let r = _mm_maskz_mov_pd(0, a); assert_eq_m128d(r, _mm_setzero_pd()); @@ -790,7 +795,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_add_epi64() { + const unsafe fn test_mm512_add_epi64() { let a = _mm512_setr_epi64(0, 1, -1, i64::MAX, i64::MIN, 100, -100, -32); let b = _mm512_set1_epi64(1); let r = _mm512_add_epi64(a, b); @@ -799,7 +804,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_add_epi64() { + const unsafe fn test_mm512_mask_add_epi64() { let a = _mm512_setr_epi64(0, 1, -1, i64::MAX, i64::MIN, 100, -100, -32); let b = _mm512_set1_epi64(1); let r = _mm512_mask_add_epi64(a, 0, a, b); @@ -810,7 +815,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_add_epi64() { + const unsafe fn test_mm512_maskz_add_epi64() { let a = _mm512_setr_epi64(0, 1, -1, i64::MAX, i64::MIN, 100, -100, -32); let b = _mm512_set1_epi64(1); let r = _mm512_maskz_add_epi64(0, a, b); @@ -821,7 +826,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_add_epi64() { + const unsafe fn test_mm256_mask_add_epi64() { let a = _mm256_set_epi64x(1, -1, i64::MAX, i64::MIN); let b = _mm256_set1_epi64x(1); let r = _mm256_mask_add_epi64(a, 0, a, b); @@ -832,7 +837,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_add_epi64() { + const unsafe fn test_mm256_maskz_add_epi64() { let a = _mm256_set_epi64x(1, -1, i64::MAX, i64::MIN); let b = _mm256_set1_epi64x(1); let r = _mm256_maskz_add_epi64(0, a, b); @@ -843,7 +848,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_add_epi64() { + const unsafe fn test_mm_mask_add_epi64() { let a = _mm_set_epi64x(i64::MAX, i64::MIN); let b = _mm_set1_epi64x(1); let r = _mm_mask_add_epi64(a, 0, a, b); @@ -854,7 +859,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_add_epi64() { + const unsafe fn test_mm_maskz_add_epi64() { let a = _mm_set_epi64x(i64::MAX, i64::MIN); let b = _mm_set1_epi64x(1); let r = _mm_maskz_add_epi64(0, a, b); @@ -865,7 +870,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_add_pd() { + const unsafe fn test_mm512_add_pd() { let a = _mm512_setr_pd(0., 1., -1., f64::MAX, f64::MIN, 100., -100., -32.); let b = _mm512_set1_pd(1.); let r = _mm512_add_pd(a, b); @@ -874,7 +879,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_add_pd() { + const unsafe fn test_mm512_mask_add_pd() { let a = _mm512_setr_pd(0., 1., -1., f64::MAX, f64::MIN, 100., -100., -32.); let b = _mm512_set1_pd(1.); let r = _mm512_mask_add_pd(a, 0, a, b); @@ -885,7 +890,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_add_pd() { + const unsafe fn test_mm512_maskz_add_pd() { let a = _mm512_setr_pd(0., 1., -1., f64::MAX, f64::MIN, 100., -100., -32.); let b = _mm512_set1_pd(1.); let r = _mm512_maskz_add_pd(0, a, b); @@ -896,7 +901,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_add_pd() { + const unsafe fn test_mm256_mask_add_pd() { let a = _mm256_set_pd(1., -1., f64::MAX, f64::MIN); let b = _mm256_set1_pd(1.); let r = _mm256_mask_add_pd(a, 0, a, b); @@ -907,7 +912,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_add_pd() { + const unsafe fn test_mm256_maskz_add_pd() { let a = _mm256_set_pd(1., -1., f64::MAX, f64::MIN); let b = _mm256_set1_pd(1.); let r = _mm256_maskz_add_pd(0, a, b); @@ -918,7 +923,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_add_pd() { + const unsafe fn test_mm_mask_add_pd() { let a = _mm_set_pd(f64::MAX, f64::MIN); let b = _mm_set1_pd(1.); let r = _mm_mask_add_pd(a, 0, a, b); @@ -929,7 +934,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_add_pd() { + const unsafe fn test_mm_maskz_add_pd() { let a = _mm_set_pd(f64::MAX, f64::MIN); let b = _mm_set1_pd(1.); let r = _mm_maskz_add_pd(0, a, b); @@ -940,7 +945,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_sub_epi64() { + const unsafe fn test_mm512_sub_epi64() { let a = _mm512_setr_epi64(0, 1, -1, i64::MAX, i64::MIN, 100, -100, -32); let b = _mm512_set1_epi64(1); let r = _mm512_sub_epi64(a, b); @@ -949,7 +954,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_sub_epi64() { + const unsafe fn test_mm512_mask_sub_epi64() { let a = _mm512_setr_epi64(0, 1, -1, i64::MAX, i64::MIN, 100, -100, -32); let b = _mm512_set1_epi64(1); let r = _mm512_mask_sub_epi64(a, 0, a, b); @@ -960,7 +965,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_sub_epi64() { + const unsafe fn test_mm512_maskz_sub_epi64() { let a = _mm512_setr_epi64(0, 1, -1, i64::MAX, i64::MIN, 100, -100, -32); let b = _mm512_set1_epi64(1); let r = _mm512_maskz_sub_epi64(0, a, b); @@ -971,7 +976,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_sub_epi64() { + const unsafe fn test_mm256_mask_sub_epi64() { let a = _mm256_set_epi64x(1, -1, i64::MAX, i64::MIN); let b = _mm256_set1_epi64x(1); let r = _mm256_mask_sub_epi64(a, 0, a, b); @@ -982,7 +987,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_sub_epi64() { + const unsafe fn test_mm256_maskz_sub_epi64() { let a = _mm256_set_epi64x(1, -1, i64::MAX, i64::MIN); let b = _mm256_set1_epi64x(1); let r = _mm256_maskz_sub_epi64(0, a, b); @@ -993,7 +998,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_sub_epi64() { + const unsafe fn test_mm_mask_sub_epi64() { let a = _mm_set_epi64x(i64::MAX, i64::MIN); let b = _mm_set1_epi64x(1); let r = _mm_mask_sub_epi64(a, 0, a, b); @@ -1004,7 +1009,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_sub_epi64() { + const unsafe fn test_mm_maskz_sub_epi64() { let a = _mm_set_epi64x(i64::MAX, i64::MIN); let b = _mm_set1_epi64x(1); let r = _mm_maskz_sub_epi64(0, a, b); @@ -1015,7 +1020,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_sub_pd() { + const unsafe fn test_mm512_sub_pd() { let a = _mm512_setr_pd(0., 1., -1., f64::MAX, f64::MIN, 100., -100., -32.); let b = _mm512_set1_pd(1.); let r = _mm512_sub_pd(a, b); @@ -1024,7 +1029,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_sub_pd() { + const unsafe fn test_mm512_mask_sub_pd() { let a = _mm512_setr_pd(0., 1., -1., f64::MAX, f64::MIN, 100., -100., -32.); let b = _mm512_set1_pd(1.); let r = _mm512_mask_sub_pd(a, 0, a, b); @@ -1035,7 +1040,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_sub_pd() { + const unsafe fn test_mm512_maskz_sub_pd() { let a = _mm512_setr_pd(0., 1., -1., f64::MAX, f64::MIN, 100., -100., -32.); let b = _mm512_set1_pd(1.); let r = _mm512_maskz_sub_pd(0, a, b); @@ -1046,7 +1051,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_sub_pd() { + const unsafe fn test_mm256_mask_sub_pd() { let a = _mm256_set_pd(1., -1., f64::MAX, f64::MIN); let b = _mm256_set1_pd(1.); let r = _mm256_mask_sub_pd(a, 0, a, b); @@ -1057,7 +1062,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_sub_pd() { + const unsafe fn test_mm256_maskz_sub_pd() { let a = _mm256_set_pd(1., -1., f64::MAX, f64::MIN); let b = _mm256_set1_pd(1.); let r = _mm256_maskz_sub_pd(0, a, b); @@ -1068,7 +1073,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_sub_pd() { + const unsafe fn test_mm_mask_sub_pd() { let a = _mm_set_pd(f64::MAX, f64::MIN); let b = _mm_set1_pd(1.); let r = _mm_mask_sub_pd(a, 0, a, b); @@ -1079,7 +1084,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_sub_pd() { + const unsafe fn test_mm_maskz_sub_pd() { let a = _mm_set_pd(f64::MAX, f64::MIN); let b = _mm_set1_pd(1.); let r = _mm_maskz_sub_pd(0, a, b); @@ -1090,7 +1095,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mul_epi32() { + const unsafe fn test_mm512_mul_epi32() { let a = _mm512_set1_epi32(1); let b = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_mul_epi32(a, b); @@ -1099,7 +1104,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_mul_epi32() { + const unsafe fn test_mm512_mask_mul_epi32() { let a = _mm512_set1_epi32(1); let b = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_mask_mul_epi32(a, 0, a, b); @@ -1114,7 +1119,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_mul_epi32() { + const unsafe fn test_mm512_maskz_mul_epi32() { let a = _mm512_set1_epi32(1); let b = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_maskz_mul_epi32(0, a, b); @@ -1125,7 +1130,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_mul_epi32() { + const unsafe fn test_mm256_mask_mul_epi32() { let a = _mm256_set1_epi32(1); let b = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_mask_mul_epi32(a, 0, a, b); @@ -1136,7 +1141,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_mul_epi32() { + const unsafe fn test_mm256_maskz_mul_epi32() { let a = _mm256_set1_epi32(1); let b = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_maskz_mul_epi32(0, a, b); @@ -1147,7 +1152,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_mul_epi32() { + const unsafe fn test_mm_mask_mul_epi32() { let a = _mm_set1_epi32(1); let b = _mm_set_epi32(1, 2, 3, 4); let r = _mm_mask_mul_epi32(a, 0, a, b); @@ -1158,7 +1163,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_mul_epi32() { + const unsafe fn test_mm_maskz_mul_epi32() { let a = _mm_set1_epi32(1); let b = _mm_set_epi32(1, 2, 3, 4); let r = _mm_maskz_mul_epi32(0, a, b); @@ -1169,7 +1174,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mul_epu32() { + const unsafe fn test_mm512_mul_epu32() { let a = _mm512_set1_epi32(1); let b = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_mul_epu32(a, b); @@ -1178,7 +1183,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_mul_epu32() { + const unsafe fn test_mm512_mask_mul_epu32() { let a = _mm512_set1_epi32(1); let b = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_mask_mul_epu32(a, 0, a, b); @@ -1193,7 +1198,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_mul_epu32() { + const unsafe fn test_mm512_maskz_mul_epu32() { let a = _mm512_set1_epi32(1); let b = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); let r = _mm512_maskz_mul_epu32(0, a, b); @@ -1204,7 +1209,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_mul_epu32() { + const unsafe fn test_mm256_mask_mul_epu32() { let a = _mm256_set1_epi32(1); let b = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_mask_mul_epu32(a, 0, a, b); @@ -1215,7 +1220,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_mul_epu32() { + const unsafe fn test_mm256_maskz_mul_epu32() { let a = _mm256_set1_epi32(1); let b = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_maskz_mul_epu32(0, a, b); @@ -1226,7 +1231,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_mul_epu32() { + const unsafe fn test_mm_mask_mul_epu32() { let a = _mm_set1_epi32(1); let b = _mm_set_epi32(1, 2, 3, 4); let r = _mm_mask_mul_epu32(a, 0, a, b); @@ -1237,7 +1242,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_mul_epu32() { + const unsafe fn test_mm_maskz_mul_epu32() { let a = _mm_set1_epi32(1); let b = _mm_set_epi32(1, 2, 3, 4); let r = _mm_maskz_mul_epu32(0, a, b); @@ -1248,7 +1253,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mullox_epi64() { + const unsafe fn test_mm512_mullox_epi64() { let a = _mm512_setr_epi64(0, 1, i64::MAX, i64::MIN, i64::MAX, 100, -100, -32); let b = _mm512_set1_epi64(2); let r = _mm512_mullox_epi64(a, b); @@ -1257,7 +1262,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_mullox_epi64() { + const unsafe fn test_mm512_mask_mullox_epi64() { let a = _mm512_setr_epi64(0, 1, i64::MAX, i64::MIN, i64::MAX, 100, -100, -32); let b = _mm512_set1_epi64(2); let r = _mm512_mask_mullox_epi64(a, 0, a, b); @@ -1268,7 +1273,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mul_pd() { + const unsafe fn test_mm512_mul_pd() { let a = _mm512_setr_pd(0., 1., f64::MAX, f64::MIN, f64::MAX, f64::MIN, -100., -32.); let b = _mm512_set1_pd(2.); let r = _mm512_mul_pd(a, b); @@ -1281,7 +1286,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_mul_pd() { + const unsafe fn test_mm512_mask_mul_pd() { let a = _mm512_setr_pd(0., 1., f64::MAX, f64::MIN, f64::MAX, f64::MIN, -100., -32.); let b = _mm512_set1_pd(2.); let r = _mm512_mask_mul_pd(a, 0, a, b); @@ -1296,7 +1301,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_mul_pd() { + const unsafe fn test_mm512_maskz_mul_pd() { let a = _mm512_setr_pd(0., 1., f64::MAX, f64::MIN, f64::MAX, f64::MIN, -100., -32.); let b = _mm512_set1_pd(2.); let r = _mm512_maskz_mul_pd(0, a, b); @@ -1307,7 +1312,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_mul_pd() { + const unsafe fn test_mm256_mask_mul_pd() { let a = _mm256_set_pd(0., 1., f64::MAX, f64::MIN); let b = _mm256_set1_pd(2.); let r = _mm256_mask_mul_pd(a, 0, a, b); @@ -1318,7 +1323,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_mul_pd() { + const unsafe fn test_mm256_maskz_mul_pd() { let a = _mm256_set_pd(0., 1., f64::MAX, f64::MIN); let b = _mm256_set1_pd(2.); let r = _mm256_maskz_mul_pd(0, a, b); @@ -1329,7 +1334,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_mul_pd() { + const unsafe fn test_mm_mask_mul_pd() { let a = _mm_set_pd(f64::MAX, f64::MIN); let b = _mm_set1_pd(2.); let r = _mm_mask_mul_pd(a, 0, a, b); @@ -1340,7 +1345,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_mul_pd() { + const unsafe fn test_mm_maskz_mul_pd() { let a = _mm_set_pd(f64::MAX, f64::MIN); let b = _mm_set1_pd(2.); let r = _mm_maskz_mul_pd(0, a, b); @@ -1351,7 +1356,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_div_pd() { + const unsafe fn test_mm512_div_pd() { let a = _mm512_setr_pd(0., 1., f64::MAX, f64::MIN, f64::MAX, f64::MIN, -100., -32.); let b = _mm512_setr_pd(2., 2., 0., 0., 0., 0., 2., 2.); let r = _mm512_div_pd(a, b); @@ -1364,7 +1369,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_div_pd() { + const unsafe fn test_mm512_mask_div_pd() { let a = _mm512_setr_pd(0., 1., f64::MAX, f64::MIN, f64::MAX, f64::MIN, -100., -32.); let b = _mm512_setr_pd(2., 2., 0., 0., 0., 0., 2., 2.); let r = _mm512_mask_div_pd(a, 0, a, b); @@ -1379,7 +1384,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_div_pd() { + const unsafe fn test_mm512_maskz_div_pd() { let a = _mm512_setr_pd(0., 1., f64::MAX, f64::MIN, f64::MAX, f64::MIN, -100., -32.); let b = _mm512_setr_pd(2., 2., 0., 0., 0., 0., 2., 2.); let r = _mm512_maskz_div_pd(0, a, b); @@ -1390,7 +1395,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_div_pd() { + const unsafe fn test_mm256_mask_div_pd() { let a = _mm256_set_pd(0., 1., f64::MAX, f64::MIN); let b = _mm256_set_pd(2., 2., 0., 0.); let r = _mm256_mask_div_pd(a, 0, a, b); @@ -1401,7 +1406,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_div_pd() { + const unsafe fn test_mm256_maskz_div_pd() { let a = _mm256_set_pd(0., 1., f64::MAX, f64::MIN); let b = _mm256_set_pd(2., 2., 0., 0.); let r = _mm256_maskz_div_pd(0, a, b); @@ -1412,7 +1417,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_div_pd() { + const unsafe fn test_mm_mask_div_pd() { let a = _mm_set_pd(f64::MAX, f64::MIN); let b = _mm_set_pd(0., 0.); let r = _mm_mask_div_pd(a, 0, a, b); @@ -1423,7 +1428,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_div_pd() { + const unsafe fn test_mm_maskz_div_pd() { let a = _mm_set_pd(f64::MAX, f64::MIN); let b = _mm_set_pd(0., 0.); let r = _mm_maskz_div_pd(0, a, b); @@ -1434,7 +1439,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_max_epi64() { + const unsafe fn test_mm512_max_epi64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_max_epi64(a, b); @@ -1443,7 +1448,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_max_epi64() { + const unsafe fn test_mm512_mask_max_epi64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_mask_max_epi64(a, 0, a, b); @@ -1454,7 +1459,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_max_epi64() { + const unsafe fn test_mm512_maskz_max_epi64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_maskz_max_epi64(0, a, b); @@ -1465,7 +1470,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_max_epi64() { + const unsafe fn test_mm256_max_epi64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_max_epi64(a, b); @@ -1474,7 +1479,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_max_epi64() { + const unsafe fn test_mm256_mask_max_epi64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_mask_max_epi64(a, 0, a, b); @@ -1485,7 +1490,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_max_epi64() { + const unsafe fn test_mm256_maskz_max_epi64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_maskz_max_epi64(0, a, b); @@ -1496,7 +1501,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_max_epi64() { + const unsafe fn test_mm_max_epi64() { let a = _mm_set_epi64x(2, 3); let b = _mm_set_epi64x(3, 2); let r = _mm_max_epi64(a, b); @@ -1505,7 +1510,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_max_epi64() { + const unsafe fn test_mm_mask_max_epi64() { let a = _mm_set_epi64x(2, 3); let b = _mm_set_epi64x(3, 2); let r = _mm_mask_max_epi64(a, 0, a, b); @@ -1516,7 +1521,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_max_epi64() { + const unsafe fn test_mm_maskz_max_epi64() { let a = _mm_set_epi64x(2, 3); let b = _mm_set_epi64x(3, 2); let r = _mm_maskz_max_epi64(0, a, b); @@ -1602,7 +1607,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_max_epu64() { + const unsafe fn test_mm512_max_epu64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_max_epu64(a, b); @@ -1611,7 +1616,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_max_epu64() { + const unsafe fn test_mm512_mask_max_epu64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_mask_max_epu64(a, 0, a, b); @@ -1622,7 +1627,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_max_epu64() { + const unsafe fn test_mm512_maskz_max_epu64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_maskz_max_epu64(0, a, b); @@ -1633,7 +1638,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_max_epu64() { + const unsafe fn test_mm256_max_epu64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_max_epu64(a, b); @@ -1642,7 +1647,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_max_epu64() { + const unsafe fn test_mm256_mask_max_epu64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_mask_max_epu64(a, 0, a, b); @@ -1653,7 +1658,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_max_epu64() { + const unsafe fn test_mm256_maskz_max_epu64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_maskz_max_epu64(0, a, b); @@ -1664,7 +1669,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_max_epu64() { + const unsafe fn test_mm_max_epu64() { let a = _mm_set_epi64x(2, 3); let b = _mm_set_epi64x(3, 2); let r = _mm_max_epu64(a, b); @@ -1673,7 +1678,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_max_epu64() { + const unsafe fn test_mm_mask_max_epu64() { let a = _mm_set_epi64x(2, 3); let b = _mm_set_epi64x(3, 2); let r = _mm_mask_max_epu64(a, 0, a, b); @@ -1684,7 +1689,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_max_epu64() { + const unsafe fn test_mm_maskz_max_epu64() { let a = _mm_set_epi64x(2, 3); let b = _mm_set_epi64x(3, 2); let r = _mm_maskz_max_epu64(0, a, b); @@ -1695,7 +1700,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_min_epi64() { + const unsafe fn test_mm512_min_epi64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_min_epi64(a, b); @@ -1704,7 +1709,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_min_epi64() { + const unsafe fn test_mm512_mask_min_epi64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_mask_min_epi64(a, 0, a, b); @@ -1715,7 +1720,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_min_epi64() { + const unsafe fn test_mm512_maskz_min_epi64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_maskz_min_epi64(0, a, b); @@ -1726,7 +1731,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_min_epi64() { + const unsafe fn test_mm256_min_epi64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_min_epi64(a, b); @@ -1735,7 +1740,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_min_epi64() { + const unsafe fn test_mm256_mask_min_epi64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_mask_min_epi64(a, 0, a, b); @@ -1746,7 +1751,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_min_epi64() { + const unsafe fn test_mm256_maskz_min_epi64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_maskz_min_epi64(0, a, b); @@ -1757,7 +1762,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_min_epi64() { + const unsafe fn test_mm_min_epi64() { let a = _mm_set_epi64x(0, 1); let b = _mm_set_epi64x(3, 2); let r = _mm_min_epi64(a, b); @@ -1771,7 +1776,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_min_epi64() { + const unsafe fn test_mm_mask_min_epi64() { let a = _mm_set_epi64x(0, 1); let b = _mm_set_epi64x(3, 2); let r = _mm_mask_min_epi64(a, 0, a, b); @@ -1782,7 +1787,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_min_epi64() { + const unsafe fn test_mm_maskz_min_epi64() { let a = _mm_set_epi64x(0, 1); let b = _mm_set_epi64x(3, 2); let r = _mm_maskz_min_epi64(0, a, b); @@ -1868,7 +1873,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_min_epu64() { + const unsafe fn test_mm512_min_epu64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_min_epu64(a, b); @@ -1877,7 +1882,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_min_epu64() { + const unsafe fn test_mm512_mask_min_epu64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_mask_min_epu64(a, 0, a, b); @@ -1888,7 +1893,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_min_epu64() { + const unsafe fn test_mm512_maskz_min_epu64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let b = _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0); let r = _mm512_maskz_min_epu64(0, a, b); @@ -1899,7 +1904,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_min_epu64() { + const unsafe fn test_mm256_min_epu64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_min_epu64(a, b); @@ -1908,7 +1913,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_min_epu64() { + const unsafe fn test_mm256_mask_min_epu64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_mask_min_epu64(a, 0, a, b); @@ -1919,7 +1924,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_min_epu64() { + const unsafe fn test_mm256_maskz_min_epu64() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_maskz_min_epu64(0, a, b); @@ -1930,7 +1935,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_min_epu64() { + const unsafe fn test_mm_min_epu64() { let a = _mm_set_epi64x(0, 1); let b = _mm_set_epi64x(1, 0); let r = _mm_min_epu64(a, b); @@ -1939,7 +1944,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_min_epu64() { + const unsafe fn test_mm_mask_min_epu64() { let a = _mm_set_epi64x(0, 1); let b = _mm_set_epi64x(1, 0); let r = _mm_mask_min_epu64(a, 0, a, b); @@ -1950,7 +1955,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_min_epu64() { + const unsafe fn test_mm_maskz_min_epu64() { let a = _mm_set_epi64x(0, 1); let b = _mm_set_epi64x(1, 0); let r = _mm_maskz_min_epu64(0, a, b); @@ -2029,7 +2034,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fmadd_pd() { + const unsafe fn test_mm512_fmadd_pd() { let a = _mm512_setr_pd(1., 1., 1., 1., 1., 1., 1., 1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_setr_pd(1., 1., 1., 1., 1., 1., 1., 1.); @@ -2039,7 +2044,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fmadd_pd() { + const unsafe fn test_mm512_mask_fmadd_pd() { let a = _mm512_setr_pd(1., 1., 1., 1., 1., 1., 1., 1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_setr_pd(1., 1., 1., 1., 1., 1., 1., 1.); @@ -2051,7 +2056,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fmadd_pd() { + const unsafe fn test_mm512_maskz_fmadd_pd() { let a = _mm512_setr_pd(1., 1., 1., 1., 1., 1., 1., 1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_setr_pd(1., 1., 1., 1., 1., 1., 1., 1.); @@ -2063,7 +2068,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fmadd_pd() { + const unsafe fn test_mm512_mask3_fmadd_pd() { let a = _mm512_setr_pd(1., 1., 1., 1., 1., 1., 1., 1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_setr_pd(1., 1., 1., 1., 2., 2., 2., 2.); @@ -2075,7 +2080,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fmadd_pd() { + const unsafe fn test_mm256_mask_fmadd_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2087,7 +2092,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fmadd_pd() { + const unsafe fn test_mm256_maskz_fmadd_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2099,7 +2104,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fmadd_pd() { + const unsafe fn test_mm256_mask3_fmadd_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2111,7 +2116,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fmadd_pd() { + const unsafe fn test_mm_mask_fmadd_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2123,7 +2128,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fmadd_pd() { + const unsafe fn test_mm_maskz_fmadd_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2135,7 +2140,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fmadd_pd() { + const unsafe fn test_mm_mask3_fmadd_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2147,7 +2152,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fmsub_pd() { + const unsafe fn test_mm512_fmsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2157,7 +2162,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fmsub_pd() { + const unsafe fn test_mm512_mask_fmsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2169,7 +2174,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fmsub_pd() { + const unsafe fn test_mm512_maskz_fmsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2181,7 +2186,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fmsub_pd() { + const unsafe fn test_mm512_mask3_fmsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_setr_pd(1., 1., 1., 1., 2., 2., 2., 2.); @@ -2193,7 +2198,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fmsub_pd() { + const unsafe fn test_mm256_mask_fmsub_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2205,7 +2210,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fmsub_pd() { + const unsafe fn test_mm256_maskz_fmsub_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2217,7 +2222,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fmsub_pd() { + const unsafe fn test_mm256_mask3_fmsub_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2229,7 +2234,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fmsub_pd() { + const unsafe fn test_mm_mask_fmsub_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2241,7 +2246,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fmsub_pd() { + const unsafe fn test_mm_maskz_fmsub_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2253,7 +2258,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fmsub_pd() { + const unsafe fn test_mm_mask3_fmsub_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2265,7 +2270,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fmaddsub_pd() { + const unsafe fn test_mm512_fmaddsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2275,7 +2280,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fmaddsub_pd() { + const unsafe fn test_mm512_mask_fmaddsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2287,7 +2292,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fmaddsub_pd() { + const unsafe fn test_mm512_maskz_fmaddsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2299,7 +2304,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fmaddsub_pd() { + const unsafe fn test_mm512_mask3_fmaddsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_setr_pd(1., 1., 1., 1., 2., 2., 2., 2.); @@ -2311,7 +2316,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fmaddsub_pd() { + const unsafe fn test_mm256_mask_fmaddsub_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2323,7 +2328,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fmaddsub_pd() { + const unsafe fn test_mm256_maskz_fmaddsub_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2335,7 +2340,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fmaddsub_pd() { + const unsafe fn test_mm256_mask3_fmaddsub_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2347,7 +2352,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fmaddsub_pd() { + const unsafe fn test_mm_mask_fmaddsub_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2359,7 +2364,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fmaddsub_pd() { + const unsafe fn test_mm_maskz_fmaddsub_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2371,7 +2376,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fmaddsub_pd() { + const unsafe fn test_mm_mask3_fmaddsub_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2383,7 +2388,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fmsubadd_pd() { + const unsafe fn test_mm512_fmsubadd_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2393,7 +2398,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fmsubadd_pd() { + const unsafe fn test_mm512_mask_fmsubadd_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2405,7 +2410,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fmsubadd_pd() { + const unsafe fn test_mm512_maskz_fmsubadd_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2417,7 +2422,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fmsubadd_pd() { + const unsafe fn test_mm512_mask3_fmsubadd_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_setr_pd(1., 1., 1., 1., 2., 2., 2., 2.); @@ -2429,7 +2434,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fmsubadd_pd() { + const unsafe fn test_mm256_mask_fmsubadd_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2441,7 +2446,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fmsubadd_pd() { + const unsafe fn test_mm256_maskz_fmsubadd_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2453,7 +2458,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fmsubadd_pd() { + const unsafe fn test_mm256_mask3_fmsubadd_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2465,7 +2470,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fmsubadd_pd() { + const unsafe fn test_mm_mask_fmsubadd_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2477,7 +2482,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fmsubadd_pd() { + const unsafe fn test_mm_maskz_fmsubadd_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2489,7 +2494,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fmsubadd_pd() { + const unsafe fn test_mm_mask3_fmsubadd_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2501,7 +2506,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fnmadd_pd() { + const unsafe fn test_mm512_fnmadd_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2511,7 +2516,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fnmadd_pd() { + const unsafe fn test_mm512_mask_fnmadd_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2523,7 +2528,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fnmadd_pd() { + const unsafe fn test_mm512_maskz_fnmadd_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2535,7 +2540,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fnmadd_pd() { + const unsafe fn test_mm512_mask3_fnmadd_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_setr_pd(1., 1., 1., 1., 2., 2., 2., 2.); @@ -2547,7 +2552,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fnmadd_pd() { + const unsafe fn test_mm256_mask_fnmadd_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2559,7 +2564,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fnmadd_pd() { + const unsafe fn test_mm256_maskz_fnmadd_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2571,7 +2576,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fnmadd_pd() { + const unsafe fn test_mm256_mask3_fnmadd_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2583,7 +2588,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fnmadd_pd() { + const unsafe fn test_mm_mask_fnmadd_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2595,7 +2600,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fnmadd_pd() { + const unsafe fn test_mm_maskz_fnmadd_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2607,7 +2612,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fnmadd_pd() { + const unsafe fn test_mm_mask3_fnmadd_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2619,7 +2624,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_fnmsub_pd() { + const unsafe fn test_mm512_fnmsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2629,7 +2634,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_fnmsub_pd() { + const unsafe fn test_mm512_mask_fnmsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2641,7 +2646,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_fnmsub_pd() { + const unsafe fn test_mm512_maskz_fnmsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_set1_pd(1.); @@ -2653,7 +2658,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask3_fnmsub_pd() { + const unsafe fn test_mm512_mask3_fnmsub_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let c = _mm512_setr_pd(1., 1., 1., 1., 2., 2., 2., 2.); @@ -2665,7 +2670,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_fnmsub_pd() { + const unsafe fn test_mm256_mask_fnmsub_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2677,7 +2682,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_fnmsub_pd() { + const unsafe fn test_mm256_maskz_fnmsub_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2689,7 +2694,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask3_fnmsub_pd() { + const unsafe fn test_mm256_mask3_fnmsub_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set_pd(0., 1., 2., 3.); let c = _mm256_set1_pd(1.); @@ -2701,7 +2706,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_fnmsub_pd() { + const unsafe fn test_mm_mask_fnmsub_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2713,7 +2718,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_fnmsub_pd() { + const unsafe fn test_mm_maskz_fnmsub_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -2725,7 +2730,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask3_fnmsub_pd() { + const unsafe fn test_mm_mask3_fnmsub_pd() { let a = _mm_set1_pd(1.); let b = _mm_set_pd(0., 1.); let c = _mm_set1_pd(1.); @@ -3764,7 +3769,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi8_epi64() { + const unsafe fn test_mm512_cvtepi8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi8_epi64(a); let e = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); @@ -3772,7 +3777,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi8_epi64() { + const unsafe fn test_mm512_mask_cvtepi8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_epi64(-1); let r = _mm512_mask_cvtepi8_epi64(src, 0, a); @@ -3783,7 +3788,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepi8_epi64() { + const unsafe fn test_mm512_maskz_cvtepi8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepi8_epi64(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -3793,7 +3798,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepi8_epi64() { + const unsafe fn test_mm256_mask_cvtepi8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm256_set1_epi64x(-1); let r = _mm256_mask_cvtepi8_epi64(src, 0, a); @@ -3804,7 +3809,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi8_epi64() { + const unsafe fn test_mm256_maskz_cvtepi8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm256_maskz_cvtepi8_epi64(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -3814,7 +3819,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepi8_epi64() { + const unsafe fn test_mm_mask_cvtepi8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm_set1_epi64x(-1); let r = _mm_mask_cvtepi8_epi64(src, 0, a); @@ -3825,7 +3830,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepi8_epi64() { + const unsafe fn test_mm_maskz_cvtepi8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_maskz_cvtepi8_epi64(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -3835,7 +3840,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepu8_epi64() { + const unsafe fn test_mm512_cvtepu8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepu8_epi64(a); let e = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); @@ -3843,7 +3848,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepu8_epi64() { + const unsafe fn test_mm512_mask_cvtepu8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_epi64(-1); let r = _mm512_mask_cvtepu8_epi64(src, 0, a); @@ -3854,7 +3859,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepu8_epi64() { + const unsafe fn test_mm512_maskz_cvtepu8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepu8_epi64(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -3864,7 +3869,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepu8_epi64() { + const unsafe fn test_mm256_mask_cvtepu8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm256_set1_epi64x(-1); let r = _mm256_mask_cvtepu8_epi64(src, 0, a); @@ -3875,7 +3880,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepu8_epi64() { + const unsafe fn test_mm256_maskz_cvtepu8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm256_maskz_cvtepu8_epi64(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -3885,7 +3890,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepu8_epi64() { + const unsafe fn test_mm_mask_cvtepu8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm_set1_epi64x(-1); let r = _mm_mask_cvtepu8_epi64(src, 0, a); @@ -3896,7 +3901,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepu8_epi64() { + const unsafe fn test_mm_maskz_cvtepu8_epi64() { let a = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_maskz_cvtepu8_epi64(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -3906,7 +3911,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi16_epi64() { + const unsafe fn test_mm512_cvtepi16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi16_epi64(a); let e = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); @@ -3914,7 +3919,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi16_epi64() { + const unsafe fn test_mm512_mask_cvtepi16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_epi64(-1); let r = _mm512_mask_cvtepi16_epi64(src, 0, a); @@ -3925,7 +3930,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepi16_epi64() { + const unsafe fn test_mm512_maskz_cvtepi16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepi16_epi64(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -3935,7 +3940,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepi16_epi64() { + const unsafe fn test_mm256_mask_cvtepi16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm256_set1_epi64x(-1); let r = _mm256_mask_cvtepi16_epi64(src, 0, a); @@ -3946,7 +3951,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi16_epi64() { + const unsafe fn test_mm256_maskz_cvtepi16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm256_maskz_cvtepi16_epi64(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -3956,7 +3961,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepi16_epi64() { + const unsafe fn test_mm_mask_cvtepi16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm_set1_epi64x(-1); let r = _mm_mask_cvtepi16_epi64(src, 0, a); @@ -3967,7 +3972,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepi16_epi64() { + const unsafe fn test_mm_maskz_cvtepi16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_maskz_cvtepi16_epi64(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -3977,7 +3982,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepu16_epi64() { + const unsafe fn test_mm512_cvtepu16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepu16_epi64(a); let e = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); @@ -3985,7 +3990,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepu16_epi64() { + const unsafe fn test_mm512_mask_cvtepu16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_epi64(-1); let r = _mm512_mask_cvtepu16_epi64(src, 0, a); @@ -3996,7 +4001,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepu16_epi64() { + const unsafe fn test_mm512_maskz_cvtepu16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepu16_epi64(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -4006,7 +4011,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepu16_epi64() { + const unsafe fn test_mm256_mask_cvtepu16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm256_set1_epi64x(-1); let r = _mm256_mask_cvtepu16_epi64(src, 0, a); @@ -4017,7 +4022,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepu16_epi64() { + const unsafe fn test_mm256_maskz_cvtepu16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm256_maskz_cvtepu16_epi64(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -4027,7 +4032,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepu16_epi64() { + const unsafe fn test_mm_mask_cvtepu16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm_set1_epi64x(-1); let r = _mm_mask_cvtepu16_epi64(src, 0, a); @@ -4038,7 +4043,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepu16_epi64() { + const unsafe fn test_mm_maskz_cvtepu16_epi64() { let a = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm_maskz_cvtepu16_epi64(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -4048,7 +4053,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi32_epi64() { + const unsafe fn test_mm512_cvtepi32_epi64() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi32_epi64(a); let e = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); @@ -4056,7 +4061,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi32_epi64() { + const unsafe fn test_mm512_mask_cvtepi32_epi64() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_epi64(-1); let r = _mm512_mask_cvtepi32_epi64(src, 0, a); @@ -4067,7 +4072,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepi32_epi64() { + const unsafe fn test_mm512_maskz_cvtepi32_epi64() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepi32_epi64(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -4077,7 +4082,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepi32_epi64() { + const unsafe fn test_mm256_mask_cvtepi32_epi64() { let a = _mm_set_epi32(8, 9, 10, 11); let src = _mm256_set1_epi64x(-1); let r = _mm256_mask_cvtepi32_epi64(src, 0, a); @@ -4088,7 +4093,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi32_epi64() { + const unsafe fn test_mm256_maskz_cvtepi32_epi64() { let a = _mm_set_epi32(8, 9, 10, 11); let r = _mm256_maskz_cvtepi32_epi64(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -4098,7 +4103,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepi32_epi64() { + const unsafe fn test_mm_mask_cvtepi32_epi64() { let a = _mm_set_epi32(8, 9, 10, 11); let src = _mm_set1_epi64x(0); let r = _mm_mask_cvtepi32_epi64(src, 0, a); @@ -4109,7 +4114,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepi32_epi64() { + const unsafe fn test_mm_maskz_cvtepi32_epi64() { let a = _mm_set_epi32(8, 9, 10, 11); let r = _mm_maskz_cvtepi32_epi64(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -4119,7 +4124,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepu32_epi64() { + const unsafe fn test_mm512_cvtepu32_epi64() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepu32_epi64(a); let e = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); @@ -4127,7 +4132,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepu32_epi64() { + const unsafe fn test_mm512_mask_cvtepu32_epi64() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_epi64(-1); let r = _mm512_mask_cvtepu32_epi64(src, 0, a); @@ -4138,7 +4143,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepu32_epi64() { + const unsafe fn test_mm512_maskz_cvtepu32_epi64() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepu32_epi64(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -4148,7 +4153,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepu32_epi64() { + const unsafe fn test_mm256_mask_cvtepu32_epi64() { let a = _mm_set_epi32(12, 13, 14, 15); let src = _mm256_set1_epi64x(-1); let r = _mm256_mask_cvtepu32_epi64(src, 0, a); @@ -4159,7 +4164,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepu32_epi64() { + const unsafe fn test_mm256_maskz_cvtepu32_epi64() { let a = _mm_set_epi32(12, 13, 14, 15); let r = _mm256_maskz_cvtepu32_epi64(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -4169,7 +4174,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepu32_epi64() { + const unsafe fn test_mm_mask_cvtepu32_epi64() { let a = _mm_set_epi32(12, 13, 14, 15); let src = _mm_set1_epi64x(-1); let r = _mm_mask_cvtepu32_epi64(src, 0, a); @@ -4180,7 +4185,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepu32_epi64() { + const unsafe fn test_mm_maskz_cvtepu32_epi64() { let a = _mm_set_epi32(12, 13, 14, 15); let r = _mm_maskz_cvtepu32_epi64(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -4190,7 +4195,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi32_pd() { + const unsafe fn test_mm512_cvtepi32_pd() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi32_pd(a); let e = _mm512_set_pd(8., 9., 10., 11., 12., 13., 14., 15.); @@ -4198,7 +4203,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi32_pd() { + const unsafe fn test_mm512_mask_cvtepi32_pd() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_pd(-1.); let r = _mm512_mask_cvtepi32_pd(src, 0, a); @@ -4209,7 +4214,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepi32_pd() { + const unsafe fn test_mm512_maskz_cvtepi32_pd() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepi32_pd(0, a); assert_eq_m512d(r, _mm512_setzero_pd()); @@ -4219,7 +4224,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepi32_pd() { + const unsafe fn test_mm256_mask_cvtepi32_pd() { let a = _mm_set_epi32(12, 13, 14, 15); let src = _mm256_set1_pd(-1.); let r = _mm256_mask_cvtepi32_pd(src, 0, a); @@ -4230,7 +4235,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi32_pd() { + const unsafe fn test_mm256_maskz_cvtepi32_pd() { let a = _mm_set_epi32(12, 13, 14, 15); let r = _mm256_maskz_cvtepi32_pd(0, a); assert_eq_m256d(r, _mm256_setzero_pd()); @@ -4240,7 +4245,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepi32_pd() { + const unsafe fn test_mm_mask_cvtepi32_pd() { let a = _mm_set_epi32(12, 13, 14, 15); let src = _mm_set1_pd(-1.); let r = _mm_mask_cvtepi32_pd(src, 0, a); @@ -4251,7 +4256,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepi32_pd() { + const unsafe fn test_mm_maskz_cvtepi32_pd() { let a = _mm_set_epi32(12, 13, 14, 15); let r = _mm_maskz_cvtepi32_pd(0, a); assert_eq_m128d(r, _mm_setzero_pd()); @@ -4261,7 +4266,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepu32_pd() { + const unsafe fn test_mm512_cvtepu32_pd() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepu32_pd(a); let e = _mm512_set_pd(8., 9., 10., 11., 12., 13., 14., 15.); @@ -4269,7 +4274,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepu32_pd() { + const unsafe fn test_mm512_mask_cvtepu32_pd() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_pd(-1.); let r = _mm512_mask_cvtepu32_pd(src, 0, a); @@ -4280,7 +4285,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepu32_pd() { + const unsafe fn test_mm512_maskz_cvtepu32_pd() { let a = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepu32_pd(0, a); assert_eq_m512d(r, _mm512_setzero_pd()); @@ -4290,7 +4295,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cvtepu32_pd() { + const unsafe fn test_mm256_cvtepu32_pd() { let a = _mm_set_epi32(12, 13, 14, 15); let r = _mm256_cvtepu32_pd(a); let e = _mm256_set_pd(12., 13., 14., 15.); @@ -4298,7 +4303,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepu32_pd() { + const unsafe fn test_mm256_mask_cvtepu32_pd() { let a = _mm_set_epi32(12, 13, 14, 15); let src = _mm256_set1_pd(-1.); let r = _mm256_mask_cvtepu32_pd(src, 0, a); @@ -4309,7 +4314,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepu32_pd() { + const unsafe fn test_mm256_maskz_cvtepu32_pd() { let a = _mm_set_epi32(12, 13, 14, 15); let r = _mm256_maskz_cvtepu32_pd(0, a); assert_eq_m256d(r, _mm256_setzero_pd()); @@ -4319,7 +4324,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cvtepu32_pd() { + const unsafe fn test_mm_cvtepu32_pd() { let a = _mm_set_epi32(12, 13, 14, 15); let r = _mm_cvtepu32_pd(a); let e = _mm_set_pd(14., 15.); @@ -4327,7 +4332,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cvtepu32_pd() { + const unsafe fn test_mm_mask_cvtepu32_pd() { let a = _mm_set_epi32(12, 13, 14, 15); let src = _mm_set1_pd(-1.); let r = _mm_mask_cvtepu32_pd(src, 0, a); @@ -4338,7 +4343,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_cvtepu32_pd() { + const unsafe fn test_mm_maskz_cvtepu32_pd() { let a = _mm_set_epi32(12, 13, 14, 15); let r = _mm_maskz_cvtepu32_pd(0, a); assert_eq_m128d(r, _mm_setzero_pd()); @@ -4348,7 +4353,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi32lo_pd() { + const unsafe fn test_mm512_cvtepi32lo_pd() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi32lo_pd(a); let e = _mm512_set_pd(8., 9., 10., 11., 12., 13., 14., 15.); @@ -4356,7 +4361,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi32lo_pd() { + const unsafe fn test_mm512_mask_cvtepi32lo_pd() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_pd(-1.); let r = _mm512_mask_cvtepi32lo_pd(src, 0, a); @@ -4367,7 +4372,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepu32lo_pd() { + const unsafe fn test_mm512_cvtepu32lo_pd() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepu32lo_pd(a); let e = _mm512_set_pd(8., 9., 10., 11., 12., 13., 14., 15.); @@ -4375,7 +4380,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepu32lo_pd() { + const unsafe fn test_mm512_mask_cvtepu32lo_pd() { let a = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); let src = _mm512_set1_pd(-1.); let r = _mm512_mask_cvtepu32lo_pd(src, 0, a); @@ -4386,7 +4391,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi64_epi32() { + const unsafe fn test_mm512_cvtepi64_epi32() { let a = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi64_epi32(a); let e = _mm256_set_epi32(8, 9, 10, 11, 12, 13, 14, 15); @@ -4394,7 +4399,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi64_epi32() { + const unsafe fn test_mm512_mask_cvtepi64_epi32() { let a = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm256_set1_epi32(-1); let r = _mm512_mask_cvtepi64_epi32(src, 0, a); @@ -4405,7 +4410,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepi64_epi32() { + const unsafe fn test_mm512_maskz_cvtepi64_epi32() { let a = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepi64_epi32(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -4415,7 +4420,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cvtepi64_epi32() { + const unsafe fn test_mm256_cvtepi64_epi32() { let a = _mm256_set_epi64x(1, 2, 3, 4); let r = _mm256_cvtepi64_epi32(a); let e = _mm_set_epi32(1, 2, 3, 4); @@ -4423,7 +4428,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cvtepi64_epi32() { + const unsafe fn test_mm256_mask_cvtepi64_epi32() { let a = _mm256_set_epi64x(1, 2, 3, 4); let src = _mm_set1_epi32(0); let r = _mm256_mask_cvtepi64_epi32(src, 0, a); @@ -4434,7 +4439,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi64_epi32() { + const unsafe fn test_mm256_maskz_cvtepi64_epi32() { let a = _mm256_set_epi64x(1, 2, 3, 4); let r = _mm256_maskz_cvtepi64_epi32(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -4473,7 +4478,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cvtepi64_epi16() { + const unsafe fn test_mm512_cvtepi64_epi16() { let a = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_cvtepi64_epi16(a); let e = _mm_set_epi16(8, 9, 10, 11, 12, 13, 14, 15); @@ -4481,7 +4486,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cvtepi64_epi16() { + const unsafe fn test_mm512_mask_cvtepi64_epi16() { let a = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); let src = _mm_set1_epi16(-1); let r = _mm512_mask_cvtepi64_epi16(src, 0, a); @@ -4492,7 +4497,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_cvtepi64_epi16() { + const unsafe fn test_mm512_maskz_cvtepi64_epi16() { let a = _mm512_set_epi64(8, 9, 10, 11, 12, 13, 14, 15); let r = _mm512_maskz_cvtepi64_epi16(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -6449,42 +6454,42 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setzero_pd() { + const unsafe fn test_mm512_setzero_pd() { assert_eq_m512d(_mm512_setzero_pd(), _mm512_set1_pd(0.)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set1_epi64() { + const unsafe fn test_mm512_set1_epi64() { let r = _mm512_set_epi64(2, 2, 2, 2, 2, 2, 2, 2); assert_eq_m512i(r, _mm512_set1_epi64(2)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set1_pd() { + const unsafe fn test_mm512_set1_pd() { let expected = _mm512_set_pd(2., 2., 2., 2., 2., 2., 2., 2.); assert_eq_m512d(expected, _mm512_set1_pd(2.)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set4_epi64() { + const unsafe fn test_mm512_set4_epi64() { let r = _mm512_set_epi64(4, 3, 2, 1, 4, 3, 2, 1); assert_eq_m512i(r, _mm512_set4_epi64(4, 3, 2, 1)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set4_pd() { + const unsafe fn test_mm512_set4_pd() { let r = _mm512_set_pd(4., 3., 2., 1., 4., 3., 2., 1.); assert_eq_m512d(r, _mm512_set4_pd(4., 3., 2., 1.)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setr4_epi64() { + const unsafe fn test_mm512_setr4_epi64() { let r = _mm512_set_epi64(4, 3, 2, 1, 4, 3, 2, 1); assert_eq_m512i(r, _mm512_setr4_epi64(1, 2, 3, 4)); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setr4_pd() { + const unsafe fn test_mm512_setr4_pd() { let r = _mm512_set_pd(4., 3., 2., 1., 4., 3., 2., 1.); assert_eq_m512d(r, _mm512_setr4_pd(1., 2., 3., 4.)); } @@ -6709,7 +6714,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmplt_epu64_mask() { + const unsafe fn test_mm512_cmplt_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let m = _mm512_cmplt_epu64_mask(a, b); @@ -6717,7 +6722,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmplt_epu64_mask() { + const unsafe fn test_mm512_mask_cmplt_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let mask = 0b01111010; @@ -6726,7 +6731,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmplt_epu64_mask() { + const unsafe fn test_mm256_cmplt_epu64_mask() { let a = _mm256_set_epi64x(0, 1, 2, 100); let b = _mm256_set1_epi64x(2); let r = _mm256_cmplt_epu64_mask(a, b); @@ -6734,7 +6739,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmplt_epu64_mask() { + const unsafe fn test_mm256_mask_cmplt_epu64_mask() { let a = _mm256_set_epi64x(0, 1, 2, 100); let b = _mm256_set1_epi64x(2); let mask = 0b11111111; @@ -6743,7 +6748,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmplt_epu64_mask() { + const unsafe fn test_mm_cmplt_epu64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(2); let r = _mm_cmplt_epu64_mask(a, b); @@ -6751,7 +6756,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmplt_epu64_mask() { + const unsafe fn test_mm_mask_cmplt_epu64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(2); let mask = 0b11111111; @@ -6760,7 +6765,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpgt_epu64_mask() { + const unsafe fn test_mm512_cmpgt_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let m = _mm512_cmpgt_epu64_mask(b, a); @@ -6768,7 +6773,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpgt_epu64_mask() { + const unsafe fn test_mm512_mask_cmpgt_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let mask = 0b01111010; @@ -6777,7 +6782,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpgt_epu64_mask() { + const unsafe fn test_mm256_cmpgt_epu64_mask() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set1_epi64x(1); let r = _mm256_cmpgt_epu64_mask(a, b); @@ -6785,7 +6790,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpgt_epu64_mask() { + const unsafe fn test_mm256_mask_cmpgt_epu64_mask() { let a = _mm256_set_epi64x(0, 1, 2, 3); let b = _mm256_set1_epi64x(1); let mask = 0b11111111; @@ -6794,7 +6799,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpgt_epu64_mask() { + const unsafe fn test_mm_cmpgt_epu64_mask() { let a = _mm_set_epi64x(1, 2); let b = _mm_set1_epi64x(1); let r = _mm_cmpgt_epu64_mask(a, b); @@ -6802,7 +6807,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpgt_epu64_mask() { + const unsafe fn test_mm_mask_cmpgt_epu64_mask() { let a = _mm_set_epi64x(1, 2); let b = _mm_set1_epi64x(1); let mask = 0b11111111; @@ -6811,7 +6816,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmple_epu64_mask() { + const unsafe fn test_mm512_cmple_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); assert_eq!( @@ -6821,7 +6826,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmple_epu64_mask() { + const unsafe fn test_mm512_mask_cmple_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let mask = 0b01111010; @@ -6829,7 +6834,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmple_epu64_mask() { + const unsafe fn test_mm256_cmple_epu64_mask() { let a = _mm256_set_epi64x(0, 1, 2, 1); let b = _mm256_set1_epi64x(1); let r = _mm256_cmple_epu64_mask(a, b); @@ -6837,7 +6842,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmple_epu64_mask() { + const unsafe fn test_mm256_mask_cmple_epu64_mask() { let a = _mm256_set_epi64x(0, 1, 2, 1); let b = _mm256_set1_epi64x(1); let mask = 0b11111111; @@ -6846,7 +6851,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmple_epu64_mask() { + const unsafe fn test_mm_cmple_epu64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(1); let r = _mm_cmple_epu64_mask(a, b); @@ -6854,7 +6859,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmple_epu64_mask() { + const unsafe fn test_mm_mask_cmple_epu64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(1); let mask = 0b11111111; @@ -6863,7 +6868,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpge_epu64_mask() { + const unsafe fn test_mm512_cmpge_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); assert_eq!( @@ -6873,7 +6878,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpge_epu64_mask() { + const unsafe fn test_mm512_mask_cmpge_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let mask = 0b11111111; @@ -6882,7 +6887,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpge_epu64_mask() { + const unsafe fn test_mm256_cmpge_epu64_mask() { let a = _mm256_set_epi64x(0, 1, 2, u64::MAX as i64); let b = _mm256_set1_epi64x(1); let r = _mm256_cmpge_epu64_mask(a, b); @@ -6890,7 +6895,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpge_epu64_mask() { + const unsafe fn test_mm256_mask_cmpge_epu64_mask() { let a = _mm256_set_epi64x(0, 1, 2, u64::MAX as i64); let b = _mm256_set1_epi64x(1); let mask = 0b11111111; @@ -6899,7 +6904,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpge_epu64_mask() { + const unsafe fn test_mm_cmpge_epu64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(1); let r = _mm_cmpge_epu64_mask(a, b); @@ -6907,7 +6912,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpge_epu64_mask() { + const unsafe fn test_mm_mask_cmpge_epu64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(1); let mask = 0b11111111; @@ -6916,7 +6921,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpeq_epu64_mask() { + const unsafe fn test_mm512_cmpeq_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set_epi64(0, 1, 13, 42, i64::MAX, i64::MIN, 100, -100); let m = _mm512_cmpeq_epu64_mask(b, a); @@ -6924,7 +6929,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpeq_epu64_mask() { + const unsafe fn test_mm512_mask_cmpeq_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set_epi64(0, 1, 13, 42, i64::MAX, i64::MIN, 100, -100); let mask = 0b01111010; @@ -6933,7 +6938,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpeq_epu64_mask() { + const unsafe fn test_mm256_cmpeq_epu64_mask() { let a = _mm256_set_epi64x(0, 1, -1, u64::MAX as i64); let b = _mm256_set_epi64x(0, 1, 13, 42); let m = _mm256_cmpeq_epu64_mask(b, a); @@ -6941,7 +6946,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpeq_epu64_mask() { + const unsafe fn test_mm256_mask_cmpeq_epu64_mask() { let a = _mm256_set_epi64x(0, 1, -1, u64::MAX as i64); let b = _mm256_set_epi64x(0, 1, 13, 42); let mask = 0b11111111; @@ -6950,7 +6955,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpeq_epu64_mask() { + const unsafe fn test_mm_cmpeq_epu64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set_epi64x(0, 1); let m = _mm_cmpeq_epu64_mask(b, a); @@ -6958,7 +6963,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpeq_epu64_mask() { + const unsafe fn test_mm_mask_cmpeq_epu64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set_epi64x(0, 1); let mask = 0b11111111; @@ -6967,7 +6972,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpneq_epu64_mask() { + const unsafe fn test_mm512_cmpneq_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set_epi64(0, 1, 13, 42, i64::MAX, i64::MIN, 100, -100); let m = _mm512_cmpneq_epu64_mask(b, a); @@ -6975,7 +6980,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpneq_epu64_mask() { + const unsafe fn test_mm512_mask_cmpneq_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, -100, 100); let b = _mm512_set_epi64(0, 1, 13, 42, i64::MAX, i64::MIN, 100, -100); let mask = 0b01111010; @@ -6984,7 +6989,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpneq_epu64_mask() { + const unsafe fn test_mm256_cmpneq_epu64_mask() { let a = _mm256_set_epi64x(0, 1, -1, u64::MAX as i64); let b = _mm256_set_epi64x(0, 1, 13, 42); let r = _mm256_cmpneq_epu64_mask(b, a); @@ -6992,7 +6997,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpneq_epu64_mask() { + const unsafe fn test_mm256_mask_cmpneq_epu64_mask() { let a = _mm256_set_epi64x(0, 1, -1, u64::MAX as i64); let b = _mm256_set_epi64x(0, 1, 13, 42); let mask = 0b11111111; @@ -7001,7 +7006,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpneq_epu64_mask() { + const unsafe fn test_mm_cmpneq_epu64_mask() { let a = _mm_set_epi64x(-1, u64::MAX as i64); let b = _mm_set_epi64x(13, 42); let r = _mm_cmpneq_epu64_mask(b, a); @@ -7009,7 +7014,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpneq_epu64_mask() { + const unsafe fn test_mm_mask_cmpneq_epu64_mask() { let a = _mm_set_epi64x(-1, u64::MAX as i64); let b = _mm_set_epi64x(13, 42); let mask = 0b11111111; @@ -7018,7 +7023,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmp_epu64_mask() { + const unsafe fn test_mm512_cmp_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let m = _mm512_cmp_epu64_mask::<_MM_CMPINT_LT>(a, b); @@ -7026,7 +7031,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmp_epu64_mask() { + const unsafe fn test_mm512_mask_cmp_epu64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let mask = 0b01111010; @@ -7035,7 +7040,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmp_epu64_mask() { + const unsafe fn test_mm256_cmp_epu64_mask() { let a = _mm256_set_epi64x(0, 1, -1, 100); let b = _mm256_set1_epi64x(1); let m = _mm256_cmp_epu64_mask::<_MM_CMPINT_LT>(a, b); @@ -7043,7 +7048,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmp_epu64_mask() { + const unsafe fn test_mm256_mask_cmp_epu64_mask() { let a = _mm256_set_epi64x(0, 1, -1, 100); let b = _mm256_set1_epi64x(1); let mask = 0b11111111; @@ -7052,7 +7057,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmp_epu64_mask() { + const unsafe fn test_mm_cmp_epu64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(1); let m = _mm_cmp_epu64_mask::<_MM_CMPINT_LT>(a, b); @@ -7060,7 +7065,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmp_epu64_mask() { + const unsafe fn test_mm_mask_cmp_epu64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(1); let mask = 0b11111111; @@ -7069,7 +7074,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmplt_epi64_mask() { + const unsafe fn test_mm512_cmplt_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, 13, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let m = _mm512_cmplt_epi64_mask(a, b); @@ -7077,7 +7082,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmplt_epi64_mask() { + const unsafe fn test_mm512_mask_cmplt_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, 13, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let mask = 0b01100110; @@ -7086,7 +7091,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmplt_epi64_mask() { + const unsafe fn test_mm256_cmplt_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, -13); let b = _mm256_set1_epi64x(-1); let r = _mm256_cmplt_epi64_mask(a, b); @@ -7094,7 +7099,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmplt_epi64_mask() { + const unsafe fn test_mm256_mask_cmplt_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, -13); let b = _mm256_set1_epi64x(-1); let mask = 0b11111111; @@ -7103,7 +7108,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmplt_epi64_mask() { + const unsafe fn test_mm_cmplt_epi64_mask() { let a = _mm_set_epi64x(-1, -13); let b = _mm_set1_epi64x(-1); let r = _mm_cmplt_epi64_mask(a, b); @@ -7111,7 +7116,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmplt_epi64_mask() { + const unsafe fn test_mm_mask_cmplt_epi64_mask() { let a = _mm_set_epi64x(-1, -13); let b = _mm_set1_epi64x(-1); let mask = 0b11111111; @@ -7120,7 +7125,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpgt_epi64_mask() { + const unsafe fn test_mm512_cmpgt_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, 13, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let m = _mm512_cmpgt_epi64_mask(b, a); @@ -7128,7 +7133,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpgt_epi64_mask() { + const unsafe fn test_mm512_mask_cmpgt_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, 13, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let mask = 0b01100110; @@ -7137,7 +7142,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpgt_epi64_mask() { + const unsafe fn test_mm256_cmpgt_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, 13); let b = _mm256_set1_epi64x(-1); let r = _mm256_cmpgt_epi64_mask(a, b); @@ -7145,7 +7150,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpgt_epi64_mask() { + const unsafe fn test_mm256_mask_cmpgt_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, 13); let b = _mm256_set1_epi64x(-1); let mask = 0b11111111; @@ -7154,7 +7159,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpgt_epi64_mask() { + const unsafe fn test_mm_cmpgt_epi64_mask() { let a = _mm_set_epi64x(0, -1); let b = _mm_set1_epi64x(-1); let r = _mm_cmpgt_epi64_mask(a, b); @@ -7162,7 +7167,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpgt_epi64_mask() { + const unsafe fn test_mm_mask_cmpgt_epi64_mask() { let a = _mm_set_epi64x(0, -1); let b = _mm_set1_epi64x(-1); let mask = 0b11111111; @@ -7171,7 +7176,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmple_epi64_mask() { + const unsafe fn test_mm512_cmple_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); assert_eq!( @@ -7181,7 +7186,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmple_epi64_mask() { + const unsafe fn test_mm512_mask_cmple_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let mask = 0b01111010; @@ -7189,7 +7194,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmple_epi64_mask() { + const unsafe fn test_mm256_cmple_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, i64::MAX); let b = _mm256_set1_epi64x(-1); let r = _mm256_cmple_epi64_mask(a, b); @@ -7197,7 +7202,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmple_epi64_mask() { + const unsafe fn test_mm256_mask_cmple_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, i64::MAX); let b = _mm256_set1_epi64x(-1); let mask = 0b11111111; @@ -7206,7 +7211,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmple_epi64_mask() { + const unsafe fn test_mm_cmple_epi64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(1); let r = _mm_cmple_epi64_mask(a, b); @@ -7214,7 +7219,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmple_epi64_mask() { + const unsafe fn test_mm_mask_cmple_epi64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(1); let mask = 0b11111111; @@ -7223,7 +7228,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpge_epi64_mask() { + const unsafe fn test_mm512_cmpge_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); assert_eq!( @@ -7233,7 +7238,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpge_epi64_mask() { + const unsafe fn test_mm512_mask_cmpge_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, u64::MAX as i64, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let mask = 0b11111111; @@ -7242,7 +7247,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpge_epi64_mask() { + const unsafe fn test_mm256_cmpge_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, i64::MAX); let b = _mm256_set1_epi64x(-1); let r = _mm256_cmpge_epi64_mask(a, b); @@ -7250,7 +7255,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpge_epi64_mask() { + const unsafe fn test_mm256_mask_cmpge_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, i64::MAX); let b = _mm256_set1_epi64x(-1); let mask = 0b11111111; @@ -7259,7 +7264,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpge_epi64_mask() { + const unsafe fn test_mm_cmpge_epi64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(-1); let r = _mm_cmpge_epi64_mask(a, b); @@ -7267,7 +7272,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpge_epi64_mask() { + const unsafe fn test_mm_mask_cmpge_epi64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(-1); let mask = 0b11111111; @@ -7276,7 +7281,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpeq_epi64_mask() { + const unsafe fn test_mm512_cmpeq_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, 13, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set_epi64(0, 1, 13, 42, i64::MAX, i64::MIN, 100, -100); let m = _mm512_cmpeq_epi64_mask(b, a); @@ -7284,7 +7289,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpeq_epi64_mask() { + const unsafe fn test_mm512_mask_cmpeq_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, 13, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set_epi64(0, 1, 13, 42, i64::MAX, i64::MIN, 100, -100); let mask = 0b01111010; @@ -7293,7 +7298,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpeq_epi64_mask() { + const unsafe fn test_mm256_cmpeq_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, 13); let b = _mm256_set_epi64x(0, 1, 13, 42); let m = _mm256_cmpeq_epi64_mask(b, a); @@ -7301,7 +7306,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpeq_epi64_mask() { + const unsafe fn test_mm256_mask_cmpeq_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, 13); let b = _mm256_set_epi64x(0, 1, 13, 42); let mask = 0b11111111; @@ -7310,7 +7315,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpeq_epi64_mask() { + const unsafe fn test_mm_cmpeq_epi64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set_epi64x(0, 1); let m = _mm_cmpeq_epi64_mask(b, a); @@ -7318,7 +7323,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpeq_epi64_mask() { + const unsafe fn test_mm_mask_cmpeq_epi64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set_epi64x(0, 1); let mask = 0b11111111; @@ -7327,19 +7332,19 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_set_epi64() { + const unsafe fn test_mm512_set_epi64() { let r = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); assert_eq_m512i(r, _mm512_set_epi64(7, 6, 5, 4, 3, 2, 1, 0)) } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_setr_epi64() { + const unsafe fn test_mm512_setr_epi64() { let r = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); assert_eq_m512i(r, _mm512_setr_epi64(7, 6, 5, 4, 3, 2, 1, 0)) } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmpneq_epi64_mask() { + const unsafe fn test_mm512_cmpneq_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, 13, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set_epi64(0, 1, 13, 42, i64::MAX, i64::MIN, 100, -100); let m = _mm512_cmpneq_epi64_mask(b, a); @@ -7347,7 +7352,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmpneq_epi64_mask() { + const unsafe fn test_mm512_mask_cmpneq_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, 13, i64::MAX, i64::MIN, -100, 100); let b = _mm512_set_epi64(0, 1, 13, 42, i64::MAX, i64::MIN, 100, -100); let mask = 0b01111010; @@ -7356,7 +7361,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmpneq_epi64_mask() { + const unsafe fn test_mm256_cmpneq_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, 13); let b = _mm256_set_epi64x(0, 1, 13, 42); let r = _mm256_cmpneq_epi64_mask(b, a); @@ -7364,7 +7369,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmpneq_epi64_mask() { + const unsafe fn test_mm256_mask_cmpneq_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, 13); let b = _mm256_set_epi64x(0, 1, 13, 42); let mask = 0b11111111; @@ -7373,7 +7378,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmpneq_epi64_mask() { + const unsafe fn test_mm_cmpneq_epi64_mask() { let a = _mm_set_epi64x(-1, 13); let b = _mm_set_epi64x(13, 42); let r = _mm_cmpneq_epi64_mask(b, a); @@ -7381,7 +7386,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmpneq_epi64_mask() { + const unsafe fn test_mm_mask_cmpneq_epi64_mask() { let a = _mm_set_epi64x(-1, 13); let b = _mm_set_epi64x(13, 42); let mask = 0b11111111; @@ -7390,7 +7395,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_cmp_epi64_mask() { + const unsafe fn test_mm512_cmp_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, 13, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let m = _mm512_cmp_epi64_mask::<_MM_CMPINT_LT>(a, b); @@ -7398,7 +7403,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_cmp_epi64_mask() { + const unsafe fn test_mm512_mask_cmp_epi64_mask() { let a = _mm512_set_epi64(0, 1, -1, 13, i64::MAX, i64::MIN, 100, -100); let b = _mm512_set1_epi64(-1); let mask = 0b01100110; @@ -7407,7 +7412,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_cmp_epi64_mask() { + const unsafe fn test_mm256_cmp_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, 13); let b = _mm256_set1_epi64x(1); let m = _mm256_cmp_epi64_mask::<_MM_CMPINT_LT>(a, b); @@ -7415,7 +7420,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_cmp_epi64_mask() { + const unsafe fn test_mm256_mask_cmp_epi64_mask() { let a = _mm256_set_epi64x(0, 1, -1, 13); let b = _mm256_set1_epi64x(1); let mask = 0b11111111; @@ -7424,7 +7429,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_cmp_epi64_mask() { + const unsafe fn test_mm_cmp_epi64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(1); let m = _mm_cmp_epi64_mask::<_MM_CMPINT_LT>(a, b); @@ -7432,7 +7437,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_cmp_epi64_mask() { + const unsafe fn test_mm_mask_cmp_epi64_mask() { let a = _mm_set_epi64x(0, 1); let b = _mm_set1_epi64x(1); let mask = 0b11111111; @@ -8313,7 +8318,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_rol_epi64() { + const unsafe fn test_mm512_rol_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 63, 1 << 32, 1 << 32, 1 << 32, @@ -8329,7 +8334,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_rol_epi64() { + const unsafe fn test_mm512_mask_rol_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 63, 1 << 32, 1 << 32, 1 << 32, @@ -8347,7 +8352,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_rol_epi64() { + const unsafe fn test_mm512_maskz_rol_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 32, 1 << 32, 1 << 32, @@ -8361,7 +8366,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_rol_epi64() { + const unsafe fn test_mm256_rol_epi64() { let a = _mm256_set_epi64x(1 << 63, 1 << 32, 1 << 32, 1 << 32); let r = _mm256_rol_epi64::<1>(a); let e = _mm256_set_epi64x(1 << 0, 1 << 33, 1 << 33, 1 << 33); @@ -8369,7 +8374,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_rol_epi64() { + const unsafe fn test_mm256_mask_rol_epi64() { let a = _mm256_set_epi64x(1 << 63, 1 << 32, 1 << 32, 1 << 32); let r = _mm256_mask_rol_epi64::<1>(a, 0, a); assert_eq_m256i(r, a); @@ -8379,7 +8384,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_rol_epi64() { + const unsafe fn test_mm256_maskz_rol_epi64() { let a = _mm256_set_epi64x(1 << 63, 1 << 32, 1 << 32, 1 << 32); let r = _mm256_maskz_rol_epi64::<1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -8389,7 +8394,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_rol_epi64() { + const unsafe fn test_mm_rol_epi64() { let a = _mm_set_epi64x(1 << 63, 1 << 32); let r = _mm_rol_epi64::<1>(a); let e = _mm_set_epi64x(1 << 0, 1 << 33); @@ -8397,7 +8402,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_rol_epi64() { + const unsafe fn test_mm_mask_rol_epi64() { let a = _mm_set_epi64x(1 << 63, 1 << 32); let r = _mm_mask_rol_epi64::<1>(a, 0, a); assert_eq_m128i(r, a); @@ -8407,7 +8412,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_rol_epi64() { + const unsafe fn test_mm_maskz_rol_epi64() { let a = _mm_set_epi64x(1 << 63, 1 << 32); let r = _mm_maskz_rol_epi64::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -8417,7 +8422,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_ror_epi64() { + const unsafe fn test_mm512_ror_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 0, 1 << 32, 1 << 32, 1 << 32, @@ -8433,7 +8438,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_ror_epi64() { + const unsafe fn test_mm512_mask_ror_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 0, 1 << 32, 1 << 32, 1 << 32, @@ -8451,7 +8456,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_ror_epi64() { + const unsafe fn test_mm512_maskz_ror_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 32, 1 << 32, 1 << 32, @@ -8465,7 +8470,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_ror_epi64() { + const unsafe fn test_mm256_ror_epi64() { let a = _mm256_set_epi64x(1 << 0, 1 << 32, 1 << 32, 1 << 32); let r = _mm256_ror_epi64::<1>(a); let e = _mm256_set_epi64x(1 << 63, 1 << 31, 1 << 31, 1 << 31); @@ -8473,7 +8478,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_ror_epi64() { + const unsafe fn test_mm256_mask_ror_epi64() { let a = _mm256_set_epi64x(1 << 0, 1 << 32, 1 << 32, 1 << 32); let r = _mm256_mask_ror_epi64::<1>(a, 0, a); assert_eq_m256i(r, a); @@ -8483,7 +8488,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_ror_epi64() { + const unsafe fn test_mm256_maskz_ror_epi64() { let a = _mm256_set_epi64x(1 << 0, 1 << 32, 1 << 32, 1 << 32); let r = _mm256_maskz_ror_epi64::<1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -8493,7 +8498,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_ror_epi64() { + const unsafe fn test_mm_ror_epi64() { let a = _mm_set_epi64x(1 << 0, 1 << 32); let r = _mm_ror_epi64::<1>(a); let e = _mm_set_epi64x(1 << 63, 1 << 31); @@ -8501,7 +8506,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_ror_epi64() { + const unsafe fn test_mm_mask_ror_epi64() { let a = _mm_set_epi64x(1 << 0, 1 << 32); let r = _mm_mask_ror_epi64::<1>(a, 0, a); assert_eq_m128i(r, a); @@ -8511,7 +8516,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_ror_epi64() { + const unsafe fn test_mm_maskz_ror_epi64() { let a = _mm_set_epi64x(1 << 0, 1 << 32); let r = _mm_maskz_ror_epi64::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -8521,7 +8526,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_slli_epi64() { + const unsafe fn test_mm512_slli_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 63, 1 << 32, 1 << 32, 1 << 32, @@ -8537,7 +8542,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_slli_epi64() { + const unsafe fn test_mm512_mask_slli_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 63, 1 << 32, 1 << 32, 1 << 32, @@ -8555,7 +8560,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_slli_epi64() { + const unsafe fn test_mm512_maskz_slli_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 32, 1 << 32, 1 << 32, @@ -8569,7 +8574,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_slli_epi64() { + const unsafe fn test_mm256_mask_slli_epi64() { let a = _mm256_set_epi64x(1 << 63, 1 << 32, 1 << 32, 1 << 32); let r = _mm256_mask_slli_epi64::<1>(a, 0, a); assert_eq_m256i(r, a); @@ -8579,7 +8584,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_slli_epi64() { + const unsafe fn test_mm256_maskz_slli_epi64() { let a = _mm256_set_epi64x(1 << 63, 1 << 32, 1 << 32, 1 << 32); let r = _mm256_maskz_slli_epi64::<1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -8589,7 +8594,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_slli_epi64() { + const unsafe fn test_mm_mask_slli_epi64() { let a = _mm_set_epi64x(1 << 63, 1 << 32); let r = _mm_mask_slli_epi64::<1>(a, 0, a); assert_eq_m128i(r, a); @@ -8599,7 +8604,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_slli_epi64() { + const unsafe fn test_mm_maskz_slli_epi64() { let a = _mm_set_epi64x(1 << 63, 1 << 32); let r = _mm_maskz_slli_epi64::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -8609,7 +8614,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_srli_epi64() { + const unsafe fn test_mm512_srli_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 0, 1 << 32, 1 << 32, 1 << 32, @@ -8625,7 +8630,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_srli_epi64() { + const unsafe fn test_mm512_mask_srli_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 0, 1 << 32, 1 << 32, 1 << 32, @@ -8643,7 +8648,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_srli_epi64() { + const unsafe fn test_mm512_maskz_srli_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 32, 1 << 32, 1 << 32, @@ -8657,7 +8662,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_srli_epi64() { + const unsafe fn test_mm256_mask_srli_epi64() { let a = _mm256_set_epi64x(1 << 5, 0, 0, 0); let r = _mm256_mask_srli_epi64::<1>(a, 0, a); assert_eq_m256i(r, a); @@ -8667,7 +8672,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_srli_epi64() { + const unsafe fn test_mm256_maskz_srli_epi64() { let a = _mm256_set_epi64x(1 << 5, 0, 0, 0); let r = _mm256_maskz_srli_epi64::<1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -8677,7 +8682,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_srli_epi64() { + const unsafe fn test_mm_mask_srli_epi64() { let a = _mm_set_epi64x(1 << 5, 0); let r = _mm_mask_srli_epi64::<1>(a, 0, a); assert_eq_m128i(r, a); @@ -8687,7 +8692,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_srli_epi64() { + const unsafe fn test_mm_maskz_srli_epi64() { let a = _mm_set_epi64x(1 << 5, 0); let r = _mm_maskz_srli_epi64::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -8697,7 +8702,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_rolv_epi64() { + const unsafe fn test_mm512_rolv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 63, 1 << 32, 1 << 32, @@ -8714,7 +8719,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_rolv_epi64() { + const unsafe fn test_mm512_mask_rolv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 63, 1 << 32, 1 << 32, @@ -8733,7 +8738,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_rolv_epi64() { + const unsafe fn test_mm512_maskz_rolv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 32, 1 << 32, 1 << 32, @@ -8748,7 +8753,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_rolv_epi64() { + const unsafe fn test_mm256_rolv_epi64() { let a = _mm256_set_epi64x(1 << 32, 1 << 63, 1 << 32, 1 << 32); let b = _mm256_set_epi64x(0, 1, 2, 3); let r = _mm256_rolv_epi64(a, b); @@ -8757,7 +8762,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_rolv_epi64() { + const unsafe fn test_mm256_mask_rolv_epi64() { let a = _mm256_set_epi64x(1 << 32, 1 << 63, 1 << 32, 1 << 32); let b = _mm256_set_epi64x(0, 1, 2, 3); let r = _mm256_mask_rolv_epi64(a, 0, a, b); @@ -8768,7 +8773,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_rolv_epi64() { + const unsafe fn test_mm256_maskz_rolv_epi64() { let a = _mm256_set_epi64x(1 << 32, 1 << 63, 1 << 32, 1 << 32); let b = _mm256_set_epi64x(0, 1, 2, 3); let r = _mm256_maskz_rolv_epi64(0, a, b); @@ -8779,7 +8784,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_rolv_epi64() { + const unsafe fn test_mm_rolv_epi64() { let a = _mm_set_epi64x(1 << 32, 1 << 63); let b = _mm_set_epi64x(0, 1); let r = _mm_rolv_epi64(a, b); @@ -8788,7 +8793,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_rolv_epi64() { + const unsafe fn test_mm_mask_rolv_epi64() { let a = _mm_set_epi64x(1 << 32, 1 << 63); let b = _mm_set_epi64x(0, 1); let r = _mm_mask_rolv_epi64(a, 0, a, b); @@ -8799,7 +8804,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_rolv_epi64() { + const unsafe fn test_mm_maskz_rolv_epi64() { let a = _mm_set_epi64x(1 << 32, 1 << 63); let b = _mm_set_epi64x(0, 1); let r = _mm_maskz_rolv_epi64(0, a, b); @@ -8810,7 +8815,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_rorv_epi64() { + const unsafe fn test_mm512_rorv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 0, 1 << 32, 1 << 32, @@ -8827,7 +8832,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_rorv_epi64() { + const unsafe fn test_mm512_mask_rorv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 0, 1 << 32, 1 << 32, @@ -8846,7 +8851,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_rorv_epi64() { + const unsafe fn test_mm512_maskz_rorv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 32, 1 << 32, 1 << 32, @@ -8861,7 +8866,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_rorv_epi64() { + const unsafe fn test_mm256_rorv_epi64() { let a = _mm256_set_epi64x(1 << 32, 1 << 0, 1 << 32, 1 << 32); let b = _mm256_set_epi64x(0, 1, 2, 3); let r = _mm256_rorv_epi64(a, b); @@ -8870,7 +8875,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_rorv_epi64() { + const unsafe fn test_mm256_mask_rorv_epi64() { let a = _mm256_set_epi64x(1 << 32, 1 << 0, 1 << 32, 1 << 32); let b = _mm256_set_epi64x(0, 1, 2, 3); let r = _mm256_mask_rorv_epi64(a, 0, a, b); @@ -8881,7 +8886,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_rorv_epi64() { + const unsafe fn test_mm256_maskz_rorv_epi64() { let a = _mm256_set_epi64x(1 << 32, 1 << 0, 1 << 32, 1 << 32); let b = _mm256_set_epi64x(0, 1, 2, 3); let r = _mm256_maskz_rorv_epi64(0, a, b); @@ -8892,7 +8897,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_rorv_epi64() { + const unsafe fn test_mm_rorv_epi64() { let a = _mm_set_epi64x(1 << 32, 1 << 0); let b = _mm_set_epi64x(0, 1); let r = _mm_rorv_epi64(a, b); @@ -8901,7 +8906,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_rorv_epi64() { + const unsafe fn test_mm_mask_rorv_epi64() { let a = _mm_set_epi64x(1 << 32, 1 << 0); let b = _mm_set_epi64x(0, 1); let r = _mm_mask_rorv_epi64(a, 0, a, b); @@ -8912,7 +8917,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_rorv_epi64() { + const unsafe fn test_mm_maskz_rorv_epi64() { let a = _mm_set_epi64x(1 << 32, 1 << 0); let b = _mm_set_epi64x(0, 1); let r = _mm_maskz_rorv_epi64(0, a, b); @@ -8923,7 +8928,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_sllv_epi64() { + const unsafe fn test_mm512_sllv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 63, 1 << 32, 1 << 32, @@ -8940,7 +8945,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_sllv_epi64() { + const unsafe fn test_mm512_mask_sllv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 32, 1 << 63, 1 << 32, @@ -8959,7 +8964,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_sllv_epi64() { + const unsafe fn test_mm512_maskz_sllv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 32, 1 << 32, 1 << 32, @@ -8974,7 +8979,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_sllv_epi64() { + const unsafe fn test_mm256_mask_sllv_epi64() { let a = _mm256_set_epi64x(1 << 32, 1 << 32, 1 << 63, 1 << 32); let count = _mm256_set_epi64x(0, 1, 2, 3); let r = _mm256_mask_sllv_epi64(a, 0, a, count); @@ -8985,7 +8990,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_sllv_epi64() { + const unsafe fn test_mm256_maskz_sllv_epi64() { let a = _mm256_set_epi64x(1 << 32, 1 << 32, 1 << 63, 1 << 32); let count = _mm256_set_epi64x(0, 1, 2, 3); let r = _mm256_maskz_sllv_epi64(0, a, count); @@ -8996,7 +9001,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_sllv_epi64() { + const unsafe fn test_mm_mask_sllv_epi64() { let a = _mm_set_epi64x(1 << 63, 1 << 32); let count = _mm_set_epi64x(2, 3); let r = _mm_mask_sllv_epi64(a, 0, a, count); @@ -9007,7 +9012,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_sllv_epi64() { + const unsafe fn test_mm_maskz_sllv_epi64() { let a = _mm_set_epi64x(1 << 63, 1 << 32); let count = _mm_set_epi64x(2, 3); let r = _mm_maskz_sllv_epi64(0, a, count); @@ -9018,7 +9023,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_srlv_epi64() { + const unsafe fn test_mm512_srlv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 0, 1 << 32, 1 << 32, @@ -9035,7 +9040,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_srlv_epi64() { + const unsafe fn test_mm512_mask_srlv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 0, 1 << 32, 1 << 32, @@ -9054,7 +9059,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_srlv_epi64() { + const unsafe fn test_mm512_maskz_srlv_epi64() { #[rustfmt::skip] let a = _mm512_set_epi64( 1 << 32, 1 << 32, 1 << 32, 1 << 32, @@ -9069,7 +9074,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_srlv_epi64() { + const unsafe fn test_mm256_mask_srlv_epi64() { let a = _mm256_set_epi64x(1 << 5, 0, 0, 0); let count = _mm256_set1_epi64x(1); let r = _mm256_mask_srlv_epi64(a, 0, a, count); @@ -9080,7 +9085,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_srlv_epi64() { + const unsafe fn test_mm256_maskz_srlv_epi64() { let a = _mm256_set_epi64x(1 << 5, 0, 0, 0); let count = _mm256_set1_epi64x(1); let r = _mm256_maskz_srlv_epi64(0, a, count); @@ -9091,7 +9096,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_srlv_epi64() { + const unsafe fn test_mm_mask_srlv_epi64() { let a = _mm_set_epi64x(1 << 5, 0); let count = _mm_set1_epi64x(1); let r = _mm_mask_srlv_epi64(a, 0, a, count); @@ -9102,7 +9107,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_srlv_epi64() { + const unsafe fn test_mm_maskz_srlv_epi64() { let a = _mm_set_epi64x(1 << 5, 0); let count = _mm_set1_epi64x(1); let r = _mm_maskz_srlv_epi64(0, a, count); @@ -9399,7 +9404,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_srav_epi64() { + const unsafe fn test_mm512_srav_epi64() { let a = _mm512_set_epi64(1, -8, 0, 0, 0, 0, 15, -16); let count = _mm512_set_epi64(2, 2, 0, 0, 0, 0, 2, 1); let r = _mm512_srav_epi64(a, count); @@ -9408,7 +9413,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_srav_epi64() { + const unsafe fn test_mm512_mask_srav_epi64() { let a = _mm512_set_epi64(1, -8, 0, 0, 0, 0, 15, -16); let count = _mm512_set_epi64(2, 2, 0, 0, 0, 0, 2, 1); let r = _mm512_mask_srav_epi64(a, 0, a, count); @@ -9419,7 +9424,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_srav_epi64() { + const unsafe fn test_mm512_maskz_srav_epi64() { let a = _mm512_set_epi64(1, -8, 0, 0, 0, 0, 15, -16); let count = _mm512_set_epi64(2, 2, 0, 0, 0, 0, 2, 1); let r = _mm512_maskz_srav_epi64(0, a, count); @@ -9430,7 +9435,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_srav_epi64() { + const unsafe fn test_mm256_srav_epi64() { let a = _mm256_set_epi64x(1 << 5, 0, 0, 0); let count = _mm256_set1_epi64x(1); let r = _mm256_srav_epi64(a, count); @@ -9439,7 +9444,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_srav_epi64() { + const unsafe fn test_mm256_mask_srav_epi64() { let a = _mm256_set_epi64x(1 << 5, 0, 0, 0); let count = _mm256_set1_epi64x(1); let r = _mm256_mask_srav_epi64(a, 0, a, count); @@ -9450,7 +9455,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_srav_epi64() { + const unsafe fn test_mm256_maskz_srav_epi64() { let a = _mm256_set_epi64x(1 << 5, 0, 0, 0); let count = _mm256_set1_epi64x(1); let r = _mm256_maskz_srav_epi64(0, a, count); @@ -9461,7 +9466,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_srav_epi64() { + const unsafe fn test_mm_srav_epi64() { let a = _mm_set_epi64x(1 << 5, 0); let count = _mm_set1_epi64x(1); let r = _mm_srav_epi64(a, count); @@ -9470,7 +9475,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_srav_epi64() { + const unsafe fn test_mm_mask_srav_epi64() { let a = _mm_set_epi64x(1 << 5, 0); let count = _mm_set1_epi64x(1); let r = _mm_mask_srav_epi64(a, 0, a, count); @@ -9481,7 +9486,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_srav_epi64() { + const unsafe fn test_mm_maskz_srav_epi64() { let a = _mm_set_epi64x(1 << 5, 0); let count = _mm_set1_epi64x(1); let r = _mm_maskz_srav_epi64(0, a, count); @@ -9492,7 +9497,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_srai_epi64() { + const unsafe fn test_mm512_srai_epi64() { let a = _mm512_set_epi64(1, -4, 15, 0, 0, 0, 0, -16); let r = _mm512_srai_epi64::<2>(a); let e = _mm512_set_epi64(0, -1, 3, 0, 0, 0, 0, -4); @@ -9500,7 +9505,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_srai_epi64() { + const unsafe fn test_mm512_mask_srai_epi64() { let a = _mm512_set_epi64(1, -4, 15, 0, 0, 0, 0, -16); let r = _mm512_mask_srai_epi64::<2>(a, 0, a); assert_eq_m512i(r, a); @@ -9510,7 +9515,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_srai_epi64() { + const unsafe fn test_mm512_maskz_srai_epi64() { let a = _mm512_set_epi64(1, -4, 15, 0, 0, 0, 0, -16); let r = _mm512_maskz_srai_epi64::<2>(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -9520,7 +9525,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_srai_epi64() { + const unsafe fn test_mm256_srai_epi64() { let a = _mm256_set_epi64x(1 << 5, 0, 0, 0); let r = _mm256_srai_epi64::<1>(a); let e = _mm256_set_epi64x(1 << 4, 0, 0, 0); @@ -9528,7 +9533,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_srai_epi64() { + const unsafe fn test_mm256_mask_srai_epi64() { let a = _mm256_set_epi64x(1 << 5, 0, 0, 0); let r = _mm256_mask_srai_epi64::<1>(a, 0, a); assert_eq_m256i(r, a); @@ -9538,7 +9543,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_srai_epi64() { + const unsafe fn test_mm256_maskz_srai_epi64() { let a = _mm256_set_epi64x(1 << 5, 0, 0, 0); let r = _mm256_maskz_srai_epi64::<1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -9548,7 +9553,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_srai_epi64() { + const unsafe fn test_mm_srai_epi64() { let a = _mm_set_epi64x(1 << 5, 0); let r = _mm_srai_epi64::<1>(a); let e = _mm_set_epi64x(1 << 4, 0); @@ -9556,7 +9561,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_srai_epi64() { + const unsafe fn test_mm_mask_srai_epi64() { let a = _mm_set_epi64x(1 << 5, 0); let r = _mm_mask_srai_epi64::<1>(a, 0, a); assert_eq_m128i(r, a); @@ -9566,7 +9571,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_srai_epi64() { + const unsafe fn test_mm_maskz_srai_epi64() { let a = _mm_set_epi64x(1 << 5, 0); let r = _mm_maskz_srai_epi64::<1>(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -9576,7 +9581,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_permute_pd() { + const unsafe fn test_mm512_permute_pd() { let a = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let r = _mm512_permute_pd::<0b11_11_11_11>(a); let e = _mm512_setr_pd(1., 1., 3., 3., 5., 5., 7., 7.); @@ -9584,7 +9589,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_permute_pd() { + const unsafe fn test_mm512_mask_permute_pd() { let a = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let r = _mm512_mask_permute_pd::<0b11_11_11_11>(a, 0, a); assert_eq_m512d(r, a); @@ -9594,7 +9599,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_permute_pd() { + const unsafe fn test_mm512_maskz_permute_pd() { let a = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let r = _mm512_maskz_permute_pd::<0b11_11_11_11>(0, a); assert_eq_m512d(r, _mm512_setzero_pd()); @@ -9604,7 +9609,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_permute_pd() { + const unsafe fn test_mm256_mask_permute_pd() { let a = _mm256_set_pd(3., 2., 1., 0.); let r = _mm256_mask_permute_pd::<0b11_11>(a, 0, a); assert_eq_m256d(r, a); @@ -9614,7 +9619,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_permute_pd() { + const unsafe fn test_mm256_maskz_permute_pd() { let a = _mm256_set_pd(3., 2., 1., 0.); let r = _mm256_maskz_permute_pd::<0b11_11>(0, a); assert_eq_m256d(r, _mm256_setzero_pd()); @@ -9624,7 +9629,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_permute_pd() { + const unsafe fn test_mm_mask_permute_pd() { let a = _mm_set_pd(1., 0.); let r = _mm_mask_permute_pd::<0b11>(a, 0, a); assert_eq_m128d(r, a); @@ -9634,7 +9639,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_permute_pd() { + const unsafe fn test_mm_maskz_permute_pd() { let a = _mm_set_pd(1., 0.); let r = _mm_maskz_permute_pd::<0b11>(0, a); assert_eq_m128d(r, _mm_setzero_pd()); @@ -9644,7 +9649,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_permutex_epi64() { + const unsafe fn test_mm512_permutex_epi64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm512_permutex_epi64::<0b11_11_11_11>(a); let e = _mm512_setr_epi64(3, 3, 3, 3, 7, 7, 7, 7); @@ -9652,7 +9657,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_permutex_epi64() { + const unsafe fn test_mm512_mask_permutex_epi64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm512_mask_permutex_epi64::<0b11_11_11_11>(a, 0, a); assert_eq_m512i(r, a); @@ -9662,7 +9667,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_permutex_epi64() { + const unsafe fn test_mm512_maskz_permutex_epi64() { let a = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 6, 7); let r = _mm512_maskz_permutex_epi64::<0b11_11_11_11>(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -9672,7 +9677,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_permutex_epi64() { + const unsafe fn test_mm256_permutex_epi64() { let a = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_permutex_epi64::<0b11_11_11_11>(a); let e = _mm256_set_epi64x(3, 3, 3, 3); @@ -9680,7 +9685,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_permutex_epi64() { + const unsafe fn test_mm256_mask_permutex_epi64() { let a = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_mask_permutex_epi64::<0b11_11_11_11>(a, 0, a); assert_eq_m256i(r, a); @@ -9690,7 +9695,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_permutex_epi64() { + const unsafe fn test_mm256_maskz_permutex_epi64() { let a = _mm256_set_epi64x(3, 2, 1, 0); let r = _mm256_maskz_permutex_epi64::<0b11_11_11_11>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -9700,7 +9705,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_permutex_pd() { + const unsafe fn test_mm512_permutex_pd() { let a = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let r = _mm512_permutex_pd::<0b11_11_11_11>(a); let e = _mm512_setr_pd(3., 3., 3., 3., 7., 7., 7., 7.); @@ -9708,7 +9713,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_permutex_pd() { + const unsafe fn test_mm512_mask_permutex_pd() { let a = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let r = _mm512_mask_permutex_pd::<0b11_11_11_11>(a, 0, a); assert_eq_m512d(r, a); @@ -9718,7 +9723,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_permutex_pd() { + const unsafe fn test_mm512_maskz_permutex_pd() { let a = _mm512_setr_pd(0., 1., 2., 3., 4., 5., 6., 7.); let r = _mm512_maskz_permutex_pd::<0b11_11_11_11>(0, a); assert_eq_m512d(r, _mm512_setzero_pd()); @@ -9728,7 +9733,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_permutex_pd() { + const unsafe fn test_mm256_permutex_pd() { let a = _mm256_set_pd(0., 1., 2., 3.); let r = _mm256_permutex_pd::<0b11_11_11_11>(a); let e = _mm256_set_pd(0., 0., 0., 0.); @@ -9736,7 +9741,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_permutex_pd() { + const unsafe fn test_mm256_mask_permutex_pd() { let a = _mm256_set_pd(0., 1., 2., 3.); let r = _mm256_mask_permutex_pd::<0b11_11_11_11>(a, 0, a); assert_eq_m256d(r, a); @@ -9746,7 +9751,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_permutex_pd() { + const unsafe fn test_mm256_maskz_permutex_pd() { let a = _mm256_set_pd(0., 1., 2., 3.); let r = _mm256_maskz_permutex_pd::<0b11_11_11_11>(0, a); assert_eq_m256d(r, _mm256_setzero_pd()); @@ -10231,7 +10236,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_shuffle_pd() { + const unsafe fn test_mm256_mask_shuffle_pd() { let a = _mm256_set_pd(1., 4., 5., 8.); let b = _mm256_set_pd(2., 3., 6., 7.); let r = _mm256_mask_shuffle_pd::<0b11_11_11_11>(a, 0, a, b); @@ -10242,7 +10247,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_shuffle_pd() { + const unsafe fn test_mm256_maskz_shuffle_pd() { let a = _mm256_set_pd(1., 4., 5., 8.); let b = _mm256_set_pd(2., 3., 6., 7.); let r = _mm256_maskz_shuffle_pd::<0b11_11_11_11>(0, a, b); @@ -10253,7 +10258,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_shuffle_pd() { + const unsafe fn test_mm_mask_shuffle_pd() { let a = _mm_set_pd(1., 4.); let b = _mm_set_pd(2., 3.); let r = _mm_mask_shuffle_pd::<0b11_11_11_11>(a, 0, a, b); @@ -10264,7 +10269,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_shuffle_pd() { + const unsafe fn test_mm_maskz_shuffle_pd() { let a = _mm_set_pd(1., 4.); let b = _mm_set_pd(2., 3.); let r = _mm_maskz_shuffle_pd::<0b11_11_11_11>(0, a, b); @@ -10275,7 +10280,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_shuffle_i64x2() { + const unsafe fn test_mm512_shuffle_i64x2() { let a = _mm512_setr_epi64(1, 4, 5, 8, 9, 12, 13, 16); let b = _mm512_setr_epi64(2, 3, 6, 7, 10, 11, 14, 15); let r = _mm512_shuffle_i64x2::<0b00_00_00_00>(a, b); @@ -10284,7 +10289,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_shuffle_i64x2() { + const unsafe fn test_mm512_mask_shuffle_i64x2() { let a = _mm512_setr_epi64(1, 4, 5, 8, 9, 12, 13, 16); let b = _mm512_setr_epi64(2, 3, 6, 7, 10, 11, 14, 15); let r = _mm512_mask_shuffle_i64x2::<0b00_00_00_00>(a, 0, a, b); @@ -10295,7 +10300,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_shuffle_i64x2() { + const unsafe fn test_mm512_maskz_shuffle_i64x2() { let a = _mm512_setr_epi64(1, 4, 5, 8, 9, 12, 13, 16); let b = _mm512_setr_epi64(2, 3, 6, 7, 10, 11, 14, 15); let r = _mm512_maskz_shuffle_i64x2::<0b00_00_00_00>(0, a, b); @@ -10306,7 +10311,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_shuffle_i64x2() { + const unsafe fn test_mm256_shuffle_i64x2() { let a = _mm256_set_epi64x(1, 4, 5, 8); let b = _mm256_set_epi64x(2, 3, 6, 7); let r = _mm256_shuffle_i64x2::<0b00>(a, b); @@ -10315,7 +10320,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_shuffle_i64x2() { + const unsafe fn test_mm256_mask_shuffle_i64x2() { let a = _mm256_set_epi64x(1, 4, 5, 8); let b = _mm256_set_epi64x(2, 3, 6, 7); let r = _mm256_mask_shuffle_i64x2::<0b00>(a, 0, a, b); @@ -10326,7 +10331,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_shuffle_i64x2() { + const unsafe fn test_mm256_maskz_shuffle_i64x2() { let a = _mm256_set_epi64x(1, 4, 5, 8); let b = _mm256_set_epi64x(2, 3, 6, 7); let r = _mm256_maskz_shuffle_i64x2::<0b00>(0, a, b); @@ -10337,7 +10342,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_shuffle_f64x2() { + const unsafe fn test_mm512_shuffle_f64x2() { let a = _mm512_setr_pd(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm512_setr_pd(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm512_shuffle_f64x2::<0b00_00_00_00>(a, b); @@ -10346,7 +10351,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_shuffle_f64x2() { + const unsafe fn test_mm512_mask_shuffle_f64x2() { let a = _mm512_setr_pd(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm512_setr_pd(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm512_mask_shuffle_f64x2::<0b00_00_00_00>(a, 0, a, b); @@ -10357,7 +10362,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_shuffle_f64x2() { + const unsafe fn test_mm512_maskz_shuffle_f64x2() { let a = _mm512_setr_pd(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm512_setr_pd(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm512_maskz_shuffle_f64x2::<0b00_00_00_00>(0, a, b); @@ -10368,7 +10373,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_shuffle_f64x2() { + const unsafe fn test_mm256_shuffle_f64x2() { let a = _mm256_set_pd(1., 4., 5., 8.); let b = _mm256_set_pd(2., 3., 6., 7.); let r = _mm256_shuffle_f64x2::<0b00>(a, b); @@ -10377,7 +10382,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_shuffle_f64x2() { + const unsafe fn test_mm256_mask_shuffle_f64x2() { let a = _mm256_set_pd(1., 4., 5., 8.); let b = _mm256_set_pd(2., 3., 6., 7.); let r = _mm256_mask_shuffle_f64x2::<0b00>(a, 0, a, b); @@ -10388,7 +10393,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_shuffle_f64x2() { + const unsafe fn test_mm256_maskz_shuffle_f64x2() { let a = _mm256_set_pd(1., 4., 5., 8.); let b = _mm256_set_pd(2., 3., 6., 7.); let r = _mm256_maskz_shuffle_f64x2::<0b00>(0, a, b); @@ -10399,7 +10404,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_movedup_pd() { + const unsafe fn test_mm512_movedup_pd() { let a = _mm512_setr_pd(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm512_movedup_pd(a); let e = _mm512_setr_pd(1., 1., 3., 3., 5., 5., 7., 7.); @@ -10407,7 +10412,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_movedup_pd() { + const unsafe fn test_mm512_mask_movedup_pd() { let a = _mm512_setr_pd(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm512_mask_movedup_pd(a, 0, a); assert_eq_m512d(r, a); @@ -10417,7 +10422,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_movedup_pd() { + const unsafe fn test_mm512_maskz_movedup_pd() { let a = _mm512_setr_pd(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm512_maskz_movedup_pd(0, a); assert_eq_m512d(r, _mm512_setzero_pd()); @@ -10427,7 +10432,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_movedup_pd() { + const unsafe fn test_mm256_mask_movedup_pd() { let a = _mm256_set_pd(1., 2., 3., 4.); let r = _mm256_mask_movedup_pd(a, 0, a); assert_eq_m256d(r, a); @@ -10437,7 +10442,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_movedup_pd() { + const unsafe fn test_mm256_maskz_movedup_pd() { let a = _mm256_set_pd(1., 2., 3., 4.); let r = _mm256_maskz_movedup_pd(0, a); assert_eq_m256d(r, _mm256_setzero_pd()); @@ -10447,7 +10452,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_movedup_pd() { + const unsafe fn test_mm_mask_movedup_pd() { let a = _mm_set_pd(1., 2.); let r = _mm_mask_movedup_pd(a, 0, a); assert_eq_m128d(r, a); @@ -10457,7 +10462,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_movedup_pd() { + const unsafe fn test_mm_maskz_movedup_pd() { let a = _mm_set_pd(1., 2.); let r = _mm_maskz_movedup_pd(0, a); assert_eq_m128d(r, _mm_setzero_pd()); @@ -10467,7 +10472,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_inserti64x4() { + const unsafe fn test_mm512_inserti64x4() { let a = _mm512_setr_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm256_setr_epi64x(17, 18, 19, 20); let r = _mm512_inserti64x4::<1>(a, b); @@ -10476,7 +10481,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_inserti64x4() { + const unsafe fn test_mm512_mask_inserti64x4() { let a = _mm512_setr_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm256_setr_epi64x(17, 18, 19, 20); let r = _mm512_mask_inserti64x4::<1>(a, 0, a, b); @@ -10487,7 +10492,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_inserti64x4() { + const unsafe fn test_mm512_maskz_inserti64x4() { let a = _mm512_setr_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm256_setr_epi64x(17, 18, 19, 20); let r = _mm512_maskz_inserti64x4::<1>(0, a, b); @@ -10498,7 +10503,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_insertf64x4() { + const unsafe fn test_mm512_insertf64x4() { let a = _mm512_setr_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_setr_pd(17., 18., 19., 20.); let r = _mm512_insertf64x4::<1>(a, b); @@ -10507,7 +10512,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_insertf64x4() { + const unsafe fn test_mm512_mask_insertf64x4() { let a = _mm512_setr_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_setr_pd(17., 18., 19., 20.); let r = _mm512_mask_insertf64x4::<1>(a, 0, a, b); @@ -10518,7 +10523,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_insertf64x4() { + const unsafe fn test_mm512_maskz_insertf64x4() { let a = _mm512_setr_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_setr_pd(17., 18., 19., 20.); let r = _mm512_maskz_insertf64x4::<1>(0, a, b); @@ -10529,21 +10534,21 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castpd128_pd512() { + const unsafe fn test_mm512_castpd128_pd512() { let a = _mm_setr_pd(17., 18.); let r = _mm512_castpd128_pd512(a); assert_eq_m128d(_mm512_castpd512_pd128(r), a); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castpd256_pd512() { + const unsafe fn test_mm512_castpd256_pd512() { let a = _mm256_setr_pd(17., 18., 19., 20.); let r = _mm512_castpd256_pd512(a); assert_eq_m256d(_mm512_castpd512_pd256(r), a); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_zextpd128_pd512() { + const unsafe fn test_mm512_zextpd128_pd512() { let a = _mm_setr_pd(17., 18.); let r = _mm512_zextpd128_pd512(a); let e = _mm512_setr_pd(17., 18., 0., 0., 0., 0., 0., 0.); @@ -10551,7 +10556,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_zextpd256_pd512() { + const unsafe fn test_mm512_zextpd256_pd512() { let a = _mm256_setr_pd(17., 18., 19., 20.); let r = _mm512_zextpd256_pd512(a); let e = _mm512_setr_pd(17., 18., 19., 20., 0., 0., 0., 0.); @@ -10559,7 +10564,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castpd512_pd128() { + const unsafe fn test_mm512_castpd512_pd128() { let a = _mm512_setr_pd(17., 18., -1., -1., -1., -1., -1., -1.); let r = _mm512_castpd512_pd128(a); let e = _mm_setr_pd(17., 18.); @@ -10567,7 +10572,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castpd512_pd256() { + const unsafe fn test_mm512_castpd512_pd256() { let a = _mm512_setr_pd(17., 18., 19., 20., -1., -1., -1., -1.); let r = _mm512_castpd512_pd256(a); let e = _mm256_setr_pd(17., 18., 19., 20.); @@ -10575,7 +10580,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castpd_ps() { + const unsafe fn test_mm512_castpd_ps() { let a = _mm512_set1_pd(1.); let r = _mm512_castpd_ps(a); let e = _mm512_set_ps( @@ -10586,7 +10591,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castpd_si512() { + const unsafe fn test_mm512_castpd_si512() { let a = _mm512_set1_pd(1.); let r = _mm512_castpd_si512(a); let e = _mm512_set_epi32( @@ -10597,21 +10602,21 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castsi128_si512() { + const unsafe fn test_mm512_castsi128_si512() { let a = _mm_setr_epi64x(17, 18); let r = _mm512_castsi128_si512(a); assert_eq_m128i(_mm512_castsi512_si128(r), a); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castsi256_si512() { + const unsafe fn test_mm512_castsi256_si512() { let a = _mm256_setr_epi64x(17, 18, 19, 20); let r = _mm512_castsi256_si512(a); assert_eq_m256i(_mm512_castsi512_si256(r), a); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_zextsi128_si512() { + const unsafe fn test_mm512_zextsi128_si512() { let a = _mm_setr_epi64x(17, 18); let r = _mm512_zextsi128_si512(a); let e = _mm512_setr_epi64(17, 18, 0, 0, 0, 0, 0, 0); @@ -10619,7 +10624,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_zextsi256_si512() { + const unsafe fn test_mm512_zextsi256_si512() { let a = _mm256_setr_epi64x(17, 18, 19, 20); let r = _mm512_zextsi256_si512(a); let e = _mm512_setr_epi64(17, 18, 19, 20, 0, 0, 0, 0); @@ -10627,7 +10632,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castsi512_si128() { + const unsafe fn test_mm512_castsi512_si128() { let a = _mm512_setr_epi64(17, 18, -1, -1, -1, -1, -1, -1); let r = _mm512_castsi512_si128(a); let e = _mm_setr_epi64x(17, 18); @@ -10635,7 +10640,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castsi512_si256() { + const unsafe fn test_mm512_castsi512_si256() { let a = _mm512_setr_epi64(17, 18, 19, 20, -1, -1, -1, -1); let r = _mm512_castsi512_si256(a); let e = _mm256_setr_epi64x(17, 18, 19, 20); @@ -10643,7 +10648,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castsi512_ps() { + const unsafe fn test_mm512_castsi512_ps() { let a = _mm512_set1_epi64(1 << 62); let r = _mm512_castsi512_ps(a); let e = _mm512_set_ps( @@ -10653,7 +10658,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_castsi512_pd() { + const unsafe fn test_mm512_castsi512_pd() { let a = _mm512_set1_epi64(1 << 62); let r = _mm512_castsi512_pd(a); let e = _mm512_set_pd(2., 2., 2., 2., 2., 2., 2., 2.); @@ -10661,7 +10666,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_broadcastq_epi64() { + const unsafe fn test_mm512_broadcastq_epi64() { let a = _mm_setr_epi64x(17, 18); let r = _mm512_broadcastq_epi64(a); let e = _mm512_set1_epi64(17); @@ -10669,7 +10674,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_broadcastq_epi64() { + const unsafe fn test_mm512_mask_broadcastq_epi64() { let src = _mm512_set1_epi64(18); let a = _mm_setr_epi64x(17, 18); let r = _mm512_mask_broadcastq_epi64(src, 0, a); @@ -10680,7 +10685,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_broadcastq_epi64() { + const unsafe fn test_mm512_maskz_broadcastq_epi64() { let a = _mm_setr_epi64x(17, 18); let r = _mm512_maskz_broadcastq_epi64(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -10690,7 +10695,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_broadcastq_epi64() { + const unsafe fn test_mm256_mask_broadcastq_epi64() { let src = _mm256_set1_epi64x(18); let a = _mm_set_epi64x(17, 18); let r = _mm256_mask_broadcastq_epi64(src, 0, a); @@ -10701,7 +10706,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_broadcastq_epi64() { + const unsafe fn test_mm256_maskz_broadcastq_epi64() { let a = _mm_set_epi64x(17, 18); let r = _mm256_maskz_broadcastq_epi64(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -10711,7 +10716,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_broadcastq_epi64() { + const unsafe fn test_mm_mask_broadcastq_epi64() { let src = _mm_set1_epi64x(18); let a = _mm_set_epi64x(17, 18); let r = _mm_mask_broadcastq_epi64(src, 0, a); @@ -10722,7 +10727,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_broadcastq_epi64() { + const unsafe fn test_mm_maskz_broadcastq_epi64() { let a = _mm_set_epi64x(17, 18); let r = _mm_maskz_broadcastq_epi64(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -10732,7 +10737,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_broadcastsd_pd() { + const unsafe fn test_mm512_broadcastsd_pd() { let a = _mm_set_pd(17., 18.); let r = _mm512_broadcastsd_pd(a); let e = _mm512_set1_pd(18.); @@ -10740,7 +10745,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_broadcastsd_pd() { + const unsafe fn test_mm512_mask_broadcastsd_pd() { let src = _mm512_set1_pd(18.); let a = _mm_set_pd(17., 18.); let r = _mm512_mask_broadcastsd_pd(src, 0, a); @@ -10751,7 +10756,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_broadcastsd_pd() { + const unsafe fn test_mm512_maskz_broadcastsd_pd() { let a = _mm_set_pd(17., 18.); let r = _mm512_maskz_broadcastsd_pd(0, a); assert_eq_m512d(r, _mm512_setzero_pd()); @@ -10761,7 +10766,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_broadcastsd_pd() { + const unsafe fn test_mm256_mask_broadcastsd_pd() { let src = _mm256_set1_pd(18.); let a = _mm_set_pd(17., 18.); let r = _mm256_mask_broadcastsd_pd(src, 0, a); @@ -10772,7 +10777,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_broadcastsd_pd() { + const unsafe fn test_mm256_maskz_broadcastsd_pd() { let a = _mm_set_pd(17., 18.); let r = _mm256_maskz_broadcastsd_pd(0, a); assert_eq_m256d(r, _mm256_setzero_pd()); @@ -10782,7 +10787,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_broadcast_i64x4() { + const unsafe fn test_mm512_broadcast_i64x4() { let a = _mm256_set_epi64x(17, 18, 19, 20); let r = _mm512_broadcast_i64x4(a); let e = _mm512_set_epi64(17, 18, 19, 20, 17, 18, 19, 20); @@ -10790,7 +10795,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_broadcast_i64x4() { + const unsafe fn test_mm512_mask_broadcast_i64x4() { let src = _mm512_set1_epi64(18); let a = _mm256_set_epi64x(17, 18, 19, 20); let r = _mm512_mask_broadcast_i64x4(src, 0, a); @@ -10801,7 +10806,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_broadcast_i64x4() { + const unsafe fn test_mm512_maskz_broadcast_i64x4() { let a = _mm256_set_epi64x(17, 18, 19, 20); let r = _mm512_maskz_broadcast_i64x4(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -10811,7 +10816,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_broadcast_f64x4() { + const unsafe fn test_mm512_broadcast_f64x4() { let a = _mm256_set_pd(17., 18., 19., 20.); let r = _mm512_broadcast_f64x4(a); let e = _mm512_set_pd(17., 18., 19., 20., 17., 18., 19., 20.); @@ -10819,7 +10824,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_broadcast_f64x4() { + const unsafe fn test_mm512_mask_broadcast_f64x4() { let src = _mm512_set1_pd(18.); let a = _mm256_set_pd(17., 18., 19., 20.); let r = _mm512_mask_broadcast_f64x4(src, 0, a); @@ -10830,7 +10835,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_broadcast_f64x4() { + const unsafe fn test_mm512_maskz_broadcast_f64x4() { let a = _mm256_set_pd(17., 18., 19., 20.); let r = _mm512_maskz_broadcast_f64x4(0, a); assert_eq_m512d(r, _mm512_setzero_pd()); @@ -10840,7 +10845,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_blend_epi64() { + const unsafe fn test_mm512_mask_blend_epi64() { let a = _mm512_set1_epi64(1); let b = _mm512_set1_epi64(2); let r = _mm512_mask_blend_epi64(0b11110000, a, b); @@ -10849,7 +10854,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_blend_epi64() { + const unsafe fn test_mm256_mask_blend_epi64() { let a = _mm256_set1_epi64x(1); let b = _mm256_set1_epi64x(2); let r = _mm256_mask_blend_epi64(0b00001111, a, b); @@ -10858,7 +10863,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_blend_epi64() { + const unsafe fn test_mm_mask_blend_epi64() { let a = _mm_set1_epi64x(1); let b = _mm_set1_epi64x(2); let r = _mm_mask_blend_epi64(0b00000011, a, b); @@ -10867,7 +10872,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_blend_pd() { + const unsafe fn test_mm512_mask_blend_pd() { let a = _mm512_set1_pd(1.); let b = _mm512_set1_pd(2.); let r = _mm512_mask_blend_pd(0b11110000, a, b); @@ -10876,7 +10881,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_blend_pd() { + const unsafe fn test_mm256_mask_blend_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set1_pd(2.); let r = _mm256_mask_blend_pd(0b00001111, a, b); @@ -10885,7 +10890,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_blend_pd() { + const unsafe fn test_mm_mask_blend_pd() { let a = _mm_set1_pd(1.); let b = _mm_set1_pd(2.); let r = _mm_mask_blend_pd(0b00000011, a, b); @@ -10894,7 +10899,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_unpackhi_epi64() { + const unsafe fn test_mm512_unpackhi_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm512_set_epi64(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_unpackhi_epi64(a, b); @@ -10903,7 +10908,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_unpackhi_epi64() { + const unsafe fn test_mm512_mask_unpackhi_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm512_set_epi64(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_mask_unpackhi_epi64(a, 0, a, b); @@ -10914,7 +10919,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_unpackhi_epi64() { + const unsafe fn test_mm512_maskz_unpackhi_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm512_set_epi64(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_maskz_unpackhi_epi64(0, a, b); @@ -10925,7 +10930,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_unpackhi_epi64() { + const unsafe fn test_mm256_mask_unpackhi_epi64() { let a = _mm256_set_epi64x(1, 2, 3, 4); let b = _mm256_set_epi64x(17, 18, 19, 20); let r = _mm256_mask_unpackhi_epi64(a, 0, a, b); @@ -10936,7 +10941,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_unpackhi_epi64() { + const unsafe fn test_mm256_maskz_unpackhi_epi64() { let a = _mm256_set_epi64x(1, 2, 3, 4); let b = _mm256_set_epi64x(17, 18, 19, 20); let r = _mm256_maskz_unpackhi_epi64(0, a, b); @@ -10947,7 +10952,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_unpackhi_epi64() { + const unsafe fn test_mm_mask_unpackhi_epi64() { let a = _mm_set_epi64x(1, 2); let b = _mm_set_epi64x(17, 18); let r = _mm_mask_unpackhi_epi64(a, 0, a, b); @@ -10958,7 +10963,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_unpackhi_epi64() { + const unsafe fn test_mm_maskz_unpackhi_epi64() { let a = _mm_set_epi64x(1, 2); let b = _mm_set_epi64x(17, 18); let r = _mm_maskz_unpackhi_epi64(0, a, b); @@ -10969,7 +10974,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_unpackhi_pd() { + const unsafe fn test_mm512_unpackhi_pd() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm512_set_pd(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm512_unpackhi_pd(a, b); @@ -10978,7 +10983,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_unpackhi_pd() { + const unsafe fn test_mm512_mask_unpackhi_pd() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm512_set_pd(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm512_mask_unpackhi_pd(a, 0, a, b); @@ -10989,7 +10994,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_unpackhi_pd() { + const unsafe fn test_mm512_maskz_unpackhi_pd() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm512_set_pd(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm512_maskz_unpackhi_pd(0, a, b); @@ -11000,7 +11005,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_unpackhi_pd() { + const unsafe fn test_mm256_mask_unpackhi_pd() { let a = _mm256_set_pd(1., 2., 3., 4.); let b = _mm256_set_pd(17., 18., 19., 20.); let r = _mm256_mask_unpackhi_pd(a, 0, a, b); @@ -11011,7 +11016,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_unpackhi_pd() { + const unsafe fn test_mm256_maskz_unpackhi_pd() { let a = _mm256_set_pd(1., 2., 3., 4.); let b = _mm256_set_pd(17., 18., 19., 20.); let r = _mm256_maskz_unpackhi_pd(0, a, b); @@ -11022,7 +11027,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_unpackhi_pd() { + const unsafe fn test_mm_mask_unpackhi_pd() { let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(17., 18.); let r = _mm_mask_unpackhi_pd(a, 0, a, b); @@ -11033,7 +11038,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_unpackhi_pd() { + const unsafe fn test_mm_maskz_unpackhi_pd() { let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(17., 18.); let r = _mm_maskz_unpackhi_pd(0, a, b); @@ -11044,7 +11049,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_unpacklo_epi64() { + const unsafe fn test_mm512_unpacklo_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm512_set_epi64(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_unpacklo_epi64(a, b); @@ -11053,7 +11058,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_unpacklo_epi64() { + const unsafe fn test_mm512_mask_unpacklo_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm512_set_epi64(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_mask_unpacklo_epi64(a, 0, a, b); @@ -11064,7 +11069,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_unpacklo_epi64() { + const unsafe fn test_mm512_maskz_unpacklo_epi64() { let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); let b = _mm512_set_epi64(17, 18, 19, 20, 21, 22, 23, 24); let r = _mm512_maskz_unpacklo_epi64(0, a, b); @@ -11075,7 +11080,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_unpacklo_epi64() { + const unsafe fn test_mm256_mask_unpacklo_epi64() { let a = _mm256_set_epi64x(1, 2, 3, 4); let b = _mm256_set_epi64x(17, 18, 19, 20); let r = _mm256_mask_unpacklo_epi64(a, 0, a, b); @@ -11086,7 +11091,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_unpacklo_epi64() { + const unsafe fn test_mm256_maskz_unpacklo_epi64() { let a = _mm256_set_epi64x(1, 2, 3, 4); let b = _mm256_set_epi64x(17, 18, 19, 20); let r = _mm256_maskz_unpacklo_epi64(0, a, b); @@ -11097,7 +11102,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_unpacklo_epi64() { + const unsafe fn test_mm_mask_unpacklo_epi64() { let a = _mm_set_epi64x(1, 2); let b = _mm_set_epi64x(17, 18); let r = _mm_mask_unpacklo_epi64(a, 0, a, b); @@ -11108,7 +11113,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_unpacklo_epi64() { + const unsafe fn test_mm_maskz_unpacklo_epi64() { let a = _mm_set_epi64x(1, 2); let b = _mm_set_epi64x(17, 18); let r = _mm_maskz_unpacklo_epi64(0, a, b); @@ -11119,7 +11124,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_unpacklo_pd() { + const unsafe fn test_mm512_unpacklo_pd() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm512_set_pd(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm512_unpacklo_pd(a, b); @@ -11128,7 +11133,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_unpacklo_pd() { + const unsafe fn test_mm512_mask_unpacklo_pd() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm512_set_pd(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm512_mask_unpacklo_pd(a, 0, a, b); @@ -11139,7 +11144,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_unpacklo_pd() { + const unsafe fn test_mm512_maskz_unpacklo_pd() { let a = _mm512_set_pd(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm512_set_pd(17., 18., 19., 20., 21., 22., 23., 24.); let r = _mm512_maskz_unpacklo_pd(0, a, b); @@ -11150,7 +11155,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_unpacklo_pd() { + const unsafe fn test_mm256_mask_unpacklo_pd() { let a = _mm256_set_pd(1., 2., 3., 4.); let b = _mm256_set_pd(17., 18., 19., 20.); let r = _mm256_mask_unpacklo_pd(a, 0, a, b); @@ -11161,7 +11166,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_unpacklo_pd() { + const unsafe fn test_mm256_maskz_unpacklo_pd() { let a = _mm256_set_pd(1., 2., 3., 4.); let b = _mm256_set_pd(17., 18., 19., 20.); let r = _mm256_maskz_unpacklo_pd(0, a, b); @@ -11172,7 +11177,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_unpacklo_pd() { + const unsafe fn test_mm_mask_unpacklo_pd() { let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(17., 18.); let r = _mm_mask_unpacklo_pd(a, 0, a, b); @@ -11183,7 +11188,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_unpacklo_pd() { + const unsafe fn test_mm_maskz_unpacklo_pd() { let a = _mm_set_pd(1., 2.); let b = _mm_set_pd(17., 18.); let r = _mm_maskz_unpacklo_pd(0, a, b); @@ -11194,7 +11199,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_alignr_epi64() { + const unsafe fn test_mm512_alignr_epi64() { let a = _mm512_set_epi64(8, 7, 6, 5, 4, 3, 2, 1); let b = _mm512_set_epi64(16, 15, 14, 13, 12, 11, 10, 9); let r = _mm512_alignr_epi64::<0>(a, b); @@ -11207,7 +11212,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_alignr_epi64() { + const unsafe fn test_mm512_mask_alignr_epi64() { let a = _mm512_set_epi64(8, 7, 6, 5, 4, 3, 2, 1); let b = _mm512_set_epi64(16, 15, 14, 13, 12, 11, 10, 9); let r = _mm512_mask_alignr_epi64::<1>(a, 0, a, b); @@ -11218,7 +11223,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_alignr_epi64() { + const unsafe fn test_mm512_maskz_alignr_epi64() { let a = _mm512_set_epi64(8, 7, 6, 5, 4, 3, 2, 1); let b = _mm512_set_epi64(16, 15, 14, 13, 12, 11, 10, 9); let r = _mm512_maskz_alignr_epi64::<1>(0, a, b); @@ -11229,7 +11234,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_alignr_epi64() { + const unsafe fn test_mm256_alignr_epi64() { let a = _mm256_set_epi64x(4, 3, 2, 1); let b = _mm256_set_epi64x(8, 7, 6, 5); let r = _mm256_alignr_epi64::<0>(a, b); @@ -11244,7 +11249,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_alignr_epi64() { + const unsafe fn test_mm256_mask_alignr_epi64() { let a = _mm256_set_epi64x(4, 3, 2, 1); let b = _mm256_set_epi64x(8, 7, 6, 5); let r = _mm256_mask_alignr_epi64::<1>(a, 0, a, b); @@ -11255,7 +11260,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_alignr_epi64() { + const unsafe fn test_mm256_maskz_alignr_epi64() { let a = _mm256_set_epi64x(4, 3, 2, 1); let b = _mm256_set_epi64x(8, 7, 6, 5); let r = _mm256_maskz_alignr_epi64::<1>(0, a, b); @@ -11266,7 +11271,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_alignr_epi64() { + const unsafe fn test_mm_alignr_epi64() { let a = _mm_set_epi64x(2, 1); let b = _mm_set_epi64x(4, 3); let r = _mm_alignr_epi64::<0>(a, b); @@ -11275,7 +11280,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_alignr_epi64() { + const unsafe fn test_mm_mask_alignr_epi64() { let a = _mm_set_epi64x(2, 1); let b = _mm_set_epi64x(4, 3); let r = _mm_mask_alignr_epi64::<1>(a, 0, a, b); @@ -11286,7 +11291,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_alignr_epi64() { + const unsafe fn test_mm_maskz_alignr_epi64() { let a = _mm_set_epi64x(2, 1); let b = _mm_set_epi64x(4, 3); let r = _mm_maskz_alignr_epi64::<1>(0, a, b); @@ -11297,7 +11302,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_and_epi64() { + const unsafe fn test_mm512_and_epi64() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_and_epi64(a, b); @@ -11306,7 +11311,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_and_epi64() { + const unsafe fn test_mm512_mask_and_epi64() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_mask_and_epi64(a, 0, a, b); @@ -11317,7 +11322,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_and_epi64() { + const unsafe fn test_mm512_maskz_and_epi64() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_maskz_and_epi64(0, a, b); @@ -11328,7 +11333,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_and_epi64() { + const unsafe fn test_mm256_mask_and_epi64() { let a = _mm256_set1_epi64x(1 << 0 | 1 << 15); let b = _mm256_set1_epi64x(1 << 0); let r = _mm256_mask_and_epi64(a, 0, a, b); @@ -11339,7 +11344,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_and_epi64() { + const unsafe fn test_mm256_maskz_and_epi64() { let a = _mm256_set1_epi64x(1 << 0 | 1 << 15); let b = _mm256_set1_epi64x(1 << 0); let r = _mm256_maskz_and_epi64(0, a, b); @@ -11350,7 +11355,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_and_epi64() { + const unsafe fn test_mm_mask_and_epi64() { let a = _mm_set1_epi64x(1 << 0 | 1 << 15); let b = _mm_set1_epi64x(1 << 0); let r = _mm_mask_and_epi64(a, 0, a, b); @@ -11361,7 +11366,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_and_epi64() { + const unsafe fn test_mm_maskz_and_epi64() { let a = _mm_set1_epi64x(1 << 0 | 1 << 15); let b = _mm_set1_epi64x(1 << 0); let r = _mm_maskz_and_epi64(0, a, b); @@ -11372,7 +11377,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_and_si512() { + const unsafe fn test_mm512_and_si512() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_and_epi64(a, b); @@ -11381,7 +11386,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_or_epi64() { + const unsafe fn test_mm512_or_epi64() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_or_epi64(a, b); @@ -11394,7 +11399,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_or_epi64() { + const unsafe fn test_mm512_mask_or_epi64() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_mask_or_epi64(a, 0, a, b); @@ -11409,7 +11414,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_or_epi64() { + const unsafe fn test_mm512_maskz_or_epi64() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_maskz_or_epi64(0, a, b); @@ -11420,7 +11425,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_or_epi64() { + const unsafe fn test_mm256_or_epi64() { let a = _mm256_set1_epi64x(1 << 0 | 1 << 15); let b = _mm256_set1_epi64x(1 << 13); let r = _mm256_or_epi64(a, b); @@ -11429,7 +11434,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_or_epi64() { + const unsafe fn test_mm256_mask_or_epi64() { let a = _mm256_set1_epi64x(1 << 0 | 1 << 15); let b = _mm256_set1_epi64x(1 << 13); let r = _mm256_mask_or_epi64(a, 0, a, b); @@ -11440,7 +11445,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_or_epi64() { + const unsafe fn test_mm256_maskz_or_epi64() { let a = _mm256_set1_epi64x(1 << 0 | 1 << 15); let b = _mm256_set1_epi64x(1 << 13); let r = _mm256_maskz_or_epi64(0, a, b); @@ -11451,7 +11456,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_or_epi64() { + const unsafe fn test_mm_or_epi64() { let a = _mm_set1_epi64x(1 << 0 | 1 << 15); let b = _mm_set1_epi64x(1 << 13); let r = _mm_or_epi64(a, b); @@ -11460,7 +11465,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_or_epi64() { + const unsafe fn test_mm_mask_or_epi64() { let a = _mm_set1_epi64x(1 << 0 | 1 << 15); let b = _mm_set1_epi64x(1 << 13); let r = _mm_mask_or_epi64(a, 0, a, b); @@ -11471,7 +11476,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_or_epi64() { + const unsafe fn test_mm_maskz_or_epi64() { let a = _mm_set1_epi64x(1 << 0 | 1 << 15); let b = _mm_set1_epi64x(1 << 13); let r = _mm_maskz_or_epi64(0, a, b); @@ -11482,7 +11487,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_or_si512() { + const unsafe fn test_mm512_or_si512() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_or_epi64(a, b); @@ -11495,7 +11500,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_xor_epi64() { + const unsafe fn test_mm512_xor_epi64() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_xor_epi64(a, b); @@ -11504,7 +11509,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_xor_epi64() { + const unsafe fn test_mm512_mask_xor_epi64() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_mask_xor_epi64(a, 0, a, b); @@ -11515,7 +11520,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_xor_epi64() { + const unsafe fn test_mm512_maskz_xor_epi64() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_maskz_xor_epi64(0, a, b); @@ -11526,7 +11531,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_xor_epi64() { + const unsafe fn test_mm256_xor_epi64() { let a = _mm256_set1_epi64x(1 << 0 | 1 << 15); let b = _mm256_set1_epi64x(1 << 13); let r = _mm256_xor_epi64(a, b); @@ -11535,7 +11540,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_xor_epi64() { + const unsafe fn test_mm256_mask_xor_epi64() { let a = _mm256_set1_epi64x(1 << 0 | 1 << 15); let b = _mm256_set1_epi64x(1 << 13); let r = _mm256_mask_xor_epi64(a, 0, a, b); @@ -11546,7 +11551,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_xor_epi64() { + const unsafe fn test_mm256_maskz_xor_epi64() { let a = _mm256_set1_epi64x(1 << 0 | 1 << 15); let b = _mm256_set1_epi64x(1 << 13); let r = _mm256_maskz_xor_epi64(0, a, b); @@ -11557,7 +11562,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_xor_epi64() { + const unsafe fn test_mm_xor_epi64() { let a = _mm_set1_epi64x(1 << 0 | 1 << 15); let b = _mm_set1_epi64x(1 << 13); let r = _mm_xor_epi64(a, b); @@ -11566,7 +11571,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_xor_epi64() { + const unsafe fn test_mm_mask_xor_epi64() { let a = _mm_set1_epi64x(1 << 0 | 1 << 15); let b = _mm_set1_epi64x(1 << 13); let r = _mm_mask_xor_epi64(a, 0, a, b); @@ -11577,7 +11582,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_xor_epi64() { + const unsafe fn test_mm_maskz_xor_epi64() { let a = _mm_set1_epi64x(1 << 0 | 1 << 15); let b = _mm_set1_epi64x(1 << 13); let r = _mm_maskz_xor_epi64(0, a, b); @@ -11588,7 +11593,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_xor_si512() { + const unsafe fn test_mm512_xor_si512() { let a = _mm512_set_epi64(1 << 0 | 1 << 15, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let b = _mm512_set_epi64(1 << 13, 0, 0, 0, 0, 0, 0, 1 << 1 | 1 << 2 | 1 << 3); let r = _mm512_xor_epi64(a, b); @@ -11597,7 +11602,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_andnot_epi64() { + const unsafe fn test_mm512_andnot_epi64() { let a = _mm512_set1_epi64(0); let b = _mm512_set1_epi64(1 << 3 | 1 << 4); let r = _mm512_andnot_epi64(a, b); @@ -11606,7 +11611,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_andnot_epi64() { + const unsafe fn test_mm512_mask_andnot_epi64() { let a = _mm512_set1_epi64(1 << 1 | 1 << 2); let b = _mm512_set1_epi64(1 << 3 | 1 << 4); let r = _mm512_mask_andnot_epi64(a, 0, a, b); @@ -11617,7 +11622,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_andnot_epi64() { + const unsafe fn test_mm512_maskz_andnot_epi64() { let a = _mm512_set1_epi64(1 << 1 | 1 << 2); let b = _mm512_set1_epi64(1 << 3 | 1 << 4); let r = _mm512_maskz_andnot_epi64(0, a, b); @@ -11632,7 +11637,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_andnot_epi64() { + const unsafe fn test_mm256_mask_andnot_epi64() { let a = _mm256_set1_epi64x(1 << 1 | 1 << 2); let b = _mm256_set1_epi64x(1 << 3 | 1 << 4); let r = _mm256_mask_andnot_epi64(a, 0, a, b); @@ -11643,7 +11648,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_andnot_epi64() { + const unsafe fn test_mm256_maskz_andnot_epi64() { let a = _mm256_set1_epi64x(1 << 1 | 1 << 2); let b = _mm256_set1_epi64x(1 << 3 | 1 << 4); let r = _mm256_maskz_andnot_epi64(0, a, b); @@ -11654,7 +11659,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_andnot_epi64() { + const unsafe fn test_mm_mask_andnot_epi64() { let a = _mm_set1_epi64x(1 << 1 | 1 << 2); let b = _mm_set1_epi64x(1 << 3 | 1 << 4); let r = _mm_mask_andnot_epi64(a, 0, a, b); @@ -11665,7 +11670,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_andnot_epi64() { + const unsafe fn test_mm_maskz_andnot_epi64() { let a = _mm_set1_epi64x(1 << 1 | 1 << 2); let b = _mm_set1_epi64x(1 << 3 | 1 << 4); let r = _mm_maskz_andnot_epi64(0, a, b); @@ -11676,7 +11681,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_andnot_si512() { + const unsafe fn test_mm512_andnot_si512() { let a = _mm512_set1_epi64(0); let b = _mm512_set1_epi64(1 << 3 | 1 << 4); let r = _mm512_andnot_si512(a, b); @@ -11685,84 +11690,84 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_add_epi64() { + const unsafe fn test_mm512_reduce_add_epi64() { let a = _mm512_set1_epi64(1); let e: i64 = _mm512_reduce_add_epi64(a); assert_eq!(8, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_add_epi64() { + const unsafe fn test_mm512_mask_reduce_add_epi64() { let a = _mm512_set1_epi64(1); let e: i64 = _mm512_mask_reduce_add_epi64(0b11110000, a); assert_eq!(4, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_add_pd() { + const unsafe fn test_mm512_reduce_add_pd() { let a = _mm512_set1_pd(1.); let e: f64 = _mm512_reduce_add_pd(a); assert_eq!(8., e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_add_pd() { + const unsafe fn test_mm512_mask_reduce_add_pd() { let a = _mm512_set1_pd(1.); let e: f64 = _mm512_mask_reduce_add_pd(0b11110000, a); assert_eq!(4., e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_mul_epi64() { + const unsafe fn test_mm512_reduce_mul_epi64() { let a = _mm512_set1_epi64(2); let e: i64 = _mm512_reduce_mul_epi64(a); assert_eq!(256, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_mul_epi64() { + const unsafe fn test_mm512_mask_reduce_mul_epi64() { let a = _mm512_set1_epi64(2); let e: i64 = _mm512_mask_reduce_mul_epi64(0b11110000, a); assert_eq!(16, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_mul_pd() { + const unsafe fn test_mm512_reduce_mul_pd() { let a = _mm512_set1_pd(2.); let e: f64 = _mm512_reduce_mul_pd(a); assert_eq!(256., e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_mul_pd() { + const unsafe fn test_mm512_mask_reduce_mul_pd() { let a = _mm512_set1_pd(2.); let e: f64 = _mm512_mask_reduce_mul_pd(0b11110000, a); assert_eq!(16., e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_max_epi64() { + const unsafe fn test_mm512_reduce_max_epi64() { let a = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); let e: i64 = _mm512_reduce_max_epi64(a); assert_eq!(7, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_max_epi64() { + const unsafe fn test_mm512_mask_reduce_max_epi64() { let a = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); let e: i64 = _mm512_mask_reduce_max_epi64(0b11110000, a); assert_eq!(3, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_max_epu64() { + const unsafe fn test_mm512_reduce_max_epu64() { let a = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); let e: u64 = _mm512_reduce_max_epu64(a); assert_eq!(7, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_max_epu64() { + const unsafe fn test_mm512_mask_reduce_max_epu64() { let a = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); let e: u64 = _mm512_mask_reduce_max_epu64(0b11110000, a); assert_eq!(3, e); @@ -11783,28 +11788,28 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_min_epi64() { + const unsafe fn test_mm512_reduce_min_epi64() { let a = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); let e: i64 = _mm512_reduce_min_epi64(a); assert_eq!(0, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_min_epi64() { + const unsafe fn test_mm512_mask_reduce_min_epi64() { let a = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); let e: i64 = _mm512_mask_reduce_min_epi64(0b11110000, a); assert_eq!(0, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_min_epu64() { + const unsafe fn test_mm512_reduce_min_epu64() { let a = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); let e: u64 = _mm512_reduce_min_epu64(a); assert_eq!(0, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_min_epu64() { + const unsafe fn test_mm512_mask_reduce_min_epu64() { let a = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); let e: u64 = _mm512_mask_reduce_min_epu64(0b11110000, a); assert_eq!(0, e); @@ -11825,35 +11830,35 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_and_epi64() { + const unsafe fn test_mm512_reduce_and_epi64() { let a = _mm512_set_epi64(1, 1, 1, 1, 2, 2, 2, 2); let e: i64 = _mm512_reduce_and_epi64(a); assert_eq!(0, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_and_epi64() { + const unsafe fn test_mm512_mask_reduce_and_epi64() { let a = _mm512_set_epi64(1, 1, 1, 1, 2, 2, 2, 2); let e: i64 = _mm512_mask_reduce_and_epi64(0b11110000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_reduce_or_epi64() { + const unsafe fn test_mm512_reduce_or_epi64() { let a = _mm512_set_epi64(1, 1, 1, 1, 2, 2, 2, 2); let e: i64 = _mm512_reduce_or_epi64(a); assert_eq!(3, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_reduce_or_epi64() { + const unsafe fn test_mm512_mask_reduce_or_epi64() { let a = _mm512_set_epi64(1, 1, 1, 1, 2, 2, 2, 2); let e: i64 = _mm512_mask_reduce_or_epi64(0b11110000, a); assert_eq!(1, e); } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_extractf64x4_pd() { + const unsafe fn test_mm512_extractf64x4_pd() { let a = _mm512_setr_pd(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm512_extractf64x4_pd::<1>(a); let e = _mm256_setr_pd(5., 6., 7., 8.); @@ -11861,7 +11866,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_extractf64x4_pd() { + const unsafe fn test_mm512_mask_extractf64x4_pd() { let a = _mm512_setr_pd(1., 2., 3., 4., 5., 6., 7., 8.); let src = _mm256_set1_pd(100.); let r = _mm512_mask_extractf64x4_pd::<1>(src, 0, a); @@ -11872,7 +11877,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_extractf64x4_pd() { + const unsafe fn test_mm512_maskz_extractf64x4_pd() { let a = _mm512_setr_pd(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm512_maskz_extractf64x4_pd::<1>(0, a); assert_eq_m256d(r, _mm256_setzero_pd()); @@ -11882,7 +11887,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_extracti64x4_epi64() { + const unsafe fn test_mm512_extracti64x4_epi64() { let a = _mm512_setr_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_extracti64x4_epi64::<0x1>(a); let e = _mm256_setr_epi64x(5, 6, 7, 8); @@ -11890,7 +11895,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_extracti64x4_epi64() { + const unsafe fn test_mm512_mask_extracti64x4_epi64() { let a = _mm512_setr_epi64(1, 2, 3, 4, 5, 6, 7, 8); let src = _mm256_set1_epi64x(100); let r = _mm512_mask_extracti64x4_epi64::<0x1>(src, 0, a); @@ -11901,7 +11906,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_extracti64x4_epi64() { + const unsafe fn test_mm512_maskz_extracti64x4_epi64() { let a = _mm512_setr_epi64(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm512_maskz_extracti64x4_epi64::<0x1>(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -12163,7 +12168,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_loadu_epi64() { + const unsafe fn test_mm512_loadu_epi64() { let a = &[4, 3, 2, 5, -8, -9, -64, -50]; let p = a.as_ptr(); let r = _mm512_loadu_epi64(black_box(p)); @@ -12172,7 +12177,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_loadu_epi64() { + const unsafe fn test_mm256_loadu_epi64() { let a = &[4, 3, 2, 5]; let p = a.as_ptr(); let r = _mm256_loadu_epi64(black_box(p)); @@ -12181,7 +12186,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_loadu_epi64() { + const unsafe fn test_mm_loadu_epi64() { let a = &[4, 3]; let p = a.as_ptr(); let r = _mm_loadu_epi64(black_box(p)); @@ -12472,7 +12477,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_storeu_epi64() { + const unsafe fn test_mm512_storeu_epi64() { let a = _mm512_set1_epi64(9); let mut r = _mm512_set1_epi64(0); _mm512_storeu_epi64(&mut r as *mut _ as *mut i64, a); @@ -12480,7 +12485,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_storeu_epi64() { + const unsafe fn test_mm256_storeu_epi64() { let a = _mm256_set1_epi64x(9); let mut r = _mm256_set1_epi64x(0); _mm256_storeu_epi64(&mut r as *mut _ as *mut i64, a); @@ -12488,7 +12493,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_storeu_epi64() { + const unsafe fn test_mm_storeu_epi64() { let a = _mm_set1_epi64x(9); let mut r = _mm_set1_epi64x(0); _mm_storeu_epi64(&mut r as *mut _ as *mut i64, a); @@ -12496,7 +12501,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_load_epi64() { + const unsafe fn test_mm512_load_epi64() { #[repr(align(64))] struct Align { data: [i64; 8], // 64 bytes @@ -12511,7 +12516,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_load_epi64() { + const unsafe fn test_mm256_load_epi64() { #[repr(align(64))] struct Align { data: [i64; 4], @@ -12524,7 +12529,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_load_epi64() { + const unsafe fn test_mm_load_epi64() { #[repr(align(64))] struct Align { data: [i64; 2], @@ -12537,7 +12542,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_store_epi64() { + const unsafe fn test_mm512_store_epi64() { let a = _mm512_set1_epi64(9); let mut r = _mm512_set1_epi64(0); _mm512_store_epi64(&mut r as *mut _ as *mut i64, a); @@ -12545,7 +12550,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_store_epi64() { + const unsafe fn test_mm256_store_epi64() { let a = _mm256_set1_epi64x(9); let mut r = _mm256_set1_epi64x(0); _mm256_store_epi64(&mut r as *mut _ as *mut i64, a); @@ -12553,7 +12558,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_store_epi64() { + const unsafe fn test_mm_store_epi64() { let a = _mm_set1_epi64x(9); let mut r = _mm_set1_epi64x(0); _mm_store_epi64(&mut r as *mut _ as *mut i64, a); @@ -12561,7 +12566,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_load_pd() { + const unsafe fn test_mm512_load_pd() { #[repr(align(64))] struct Align { data: [f64; 8], // 64 bytes @@ -12576,7 +12581,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_store_pd() { + const unsafe fn test_mm512_store_pd() { let a = _mm512_set1_pd(9.); let mut r = _mm512_undefined_pd(); _mm512_store_pd(&mut r as *mut _ as *mut f64, a); @@ -12584,7 +12589,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_test_epi64_mask() { + const unsafe fn test_mm512_test_epi64_mask() { let a = _mm512_set1_epi64(1 << 0); let b = _mm512_set1_epi64(1 << 0 | 1 << 1); let r = _mm512_test_epi64_mask(a, b); @@ -12593,7 +12598,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_test_epi64_mask() { + const unsafe fn test_mm512_mask_test_epi64_mask() { let a = _mm512_set1_epi64(1 << 0); let b = _mm512_set1_epi64(1 << 0 | 1 << 1); let r = _mm512_mask_test_epi64_mask(0, a, b); @@ -12604,7 +12609,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_test_epi64_mask() { + const unsafe fn test_mm256_test_epi64_mask() { let a = _mm256_set1_epi64x(1 << 0); let b = _mm256_set1_epi64x(1 << 0 | 1 << 1); let r = _mm256_test_epi64_mask(a, b); @@ -12613,7 +12618,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_test_epi64_mask() { + const unsafe fn test_mm256_mask_test_epi64_mask() { let a = _mm256_set1_epi64x(1 << 0); let b = _mm256_set1_epi64x(1 << 0 | 1 << 1); let r = _mm256_mask_test_epi64_mask(0, a, b); @@ -12624,7 +12629,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_test_epi64_mask() { + const unsafe fn test_mm_test_epi64_mask() { let a = _mm_set1_epi64x(1 << 0); let b = _mm_set1_epi64x(1 << 0 | 1 << 1); let r = _mm_test_epi64_mask(a, b); @@ -12633,7 +12638,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_test_epi64_mask() { + const unsafe fn test_mm_mask_test_epi64_mask() { let a = _mm_set1_epi64x(1 << 0); let b = _mm_set1_epi64x(1 << 0 | 1 << 1); let r = _mm_mask_test_epi64_mask(0, a, b); @@ -12644,7 +12649,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_testn_epi64_mask() { + const unsafe fn test_mm512_testn_epi64_mask() { let a = _mm512_set1_epi64(1 << 0); let b = _mm512_set1_epi64(1 << 0 | 1 << 1); let r = _mm512_testn_epi64_mask(a, b); @@ -12653,7 +12658,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_testn_epi64_mask() { + const unsafe fn test_mm512_mask_testn_epi64_mask() { let a = _mm512_set1_epi64(1 << 0); let b = _mm512_set1_epi64(1 << 1); let r = _mm512_mask_testn_epi64_mask(0, a, b); @@ -12664,7 +12669,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_testn_epi64_mask() { + const unsafe fn test_mm256_testn_epi64_mask() { let a = _mm256_set1_epi64x(1 << 0); let b = _mm256_set1_epi64x(1 << 1); let r = _mm256_testn_epi64_mask(a, b); @@ -12673,7 +12678,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_testn_epi64_mask() { + const unsafe fn test_mm256_mask_testn_epi64_mask() { let a = _mm256_set1_epi64x(1 << 0); let b = _mm256_set1_epi64x(1 << 1); let r = _mm256_mask_testn_epi64_mask(0, a, b); @@ -12684,7 +12689,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_testn_epi64_mask() { + const unsafe fn test_mm_testn_epi64_mask() { let a = _mm_set1_epi64x(1 << 0); let b = _mm_set1_epi64x(1 << 1); let r = _mm_testn_epi64_mask(a, b); @@ -12693,7 +12698,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_testn_epi64_mask() { + const unsafe fn test_mm_mask_testn_epi64_mask() { let a = _mm_set1_epi64x(1 << 0); let b = _mm_set1_epi64x(1 << 1); let r = _mm_mask_testn_epi64_mask(0, a, b); @@ -12704,7 +12709,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_mask_set1_epi64() { + const unsafe fn test_mm512_mask_set1_epi64() { let src = _mm512_set1_epi64(2); let a: i64 = 11; let r = _mm512_mask_set1_epi64(src, 0, a); @@ -12715,7 +12720,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm512_maskz_set1_epi64() { + const unsafe fn test_mm512_maskz_set1_epi64() { let a: i64 = 11; let r = _mm512_maskz_set1_epi64(0, a); assert_eq_m512i(r, _mm512_setzero_si512()); @@ -12725,7 +12730,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_mask_set1_epi64() { + const unsafe fn test_mm256_mask_set1_epi64() { let src = _mm256_set1_epi64x(2); let a: i64 = 11; let r = _mm256_mask_set1_epi64(src, 0, a); @@ -12736,7 +12741,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm256_maskz_set1_epi64() { + const unsafe fn test_mm256_maskz_set1_epi64() { let a: i64 = 11; let r = _mm256_maskz_set1_epi64(0, a); assert_eq_m256i(r, _mm256_setzero_si256()); @@ -12746,7 +12751,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_mask_set1_epi64() { + const unsafe fn test_mm_mask_set1_epi64() { let src = _mm_set1_epi64x(2); let a: i64 = 11; let r = _mm_mask_set1_epi64(src, 0, a); @@ -12757,7 +12762,7 @@ mod tests { } #[simd_test(enable = "avx512f,avx512vl")] - unsafe fn test_mm_maskz_set1_epi64() { + const unsafe fn test_mm_maskz_set1_epi64() { let a: i64 = 11; let r = _mm_maskz_set1_epi64(0, a); assert_eq_m128i(r, _mm_setzero_si128()); @@ -12801,7 +12806,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_cvti64_ss() { + const unsafe fn test_mm_cvti64_ss() { let a = _mm_set_ps(0., -0.5, 1., -1.5); let b: i64 = 9; let r = _mm_cvti64_ss(a, b); @@ -12810,7 +12815,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_cvti64_sd() { + const unsafe fn test_mm_cvti64_sd() { let a = _mm_set_pd(1., -1.5); let b: i64 = 9; let r = _mm_cvti64_sd(a, b); @@ -12963,7 +12968,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_cvtu64_ss() { + const unsafe fn test_mm_cvtu64_ss() { let a = _mm_set_ps(0., -0.5, 1., -1.5); let b: u64 = 9; let r = _mm_cvtu64_ss(a, b); @@ -12972,7 +12977,7 @@ mod tests { } #[simd_test(enable = "avx512f")] - unsafe fn test_mm_cvtu64_sd() { + const unsafe fn test_mm_cvtu64_sd() { let a = _mm_set_pd(1., -1.5); let b: u64 = 9; let r = _mm_cvtu64_sd(a, b); diff --git a/crates/core_arch/src/x86_64/bmi.rs b/crates/core_arch/src/x86_64/bmi.rs index 5d204d51ae..ee41d41375 100644 --- a/crates/core_arch/src/x86_64/bmi.rs +++ b/crates/core_arch/src/x86_64/bmi.rs @@ -48,7 +48,8 @@ pub fn _bextr2_u64(a: u64, control: u64) -> u64 { #[target_feature(enable = "bmi1")] #[cfg_attr(test, assert_instr(andn))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _andn_u64(a: u64, b: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _andn_u64(a: u64, b: u64) -> u64 { !a & b } @@ -60,7 +61,8 @@ pub fn _andn_u64(a: u64, b: u64) -> u64 { #[cfg_attr(test, assert_instr(blsi))] #[cfg(not(target_arch = "x86"))] // generates lots of instructions #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blsi_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blsi_u64(x: u64) -> u64 { x & x.wrapping_neg() } @@ -72,7 +74,8 @@ pub fn _blsi_u64(x: u64) -> u64 { #[cfg_attr(test, assert_instr(blsmsk))] #[cfg(not(target_arch = "x86"))] // generates lots of instructions #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blsmsk_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blsmsk_u64(x: u64) -> u64 { x ^ (x.wrapping_sub(1_u64)) } @@ -86,7 +89,8 @@ pub fn _blsmsk_u64(x: u64) -> u64 { #[cfg_attr(test, assert_instr(blsr))] #[cfg(not(target_arch = "x86"))] // generates lots of instructions #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blsr_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blsr_u64(x: u64) -> u64 { x & (x.wrapping_sub(1)) } @@ -99,7 +103,8 @@ pub fn _blsr_u64(x: u64) -> u64 { #[target_feature(enable = "bmi1")] #[cfg_attr(test, assert_instr(tzcnt))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _tzcnt_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _tzcnt_u64(x: u64) -> u64 { x.trailing_zeros() as u64 } @@ -112,7 +117,8 @@ pub fn _tzcnt_u64(x: u64) -> u64 { #[target_feature(enable = "bmi1")] #[cfg_attr(test, assert_instr(tzcnt))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_tzcnt_64(x: u64) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_tzcnt_64(x: u64) -> i64 { x.trailing_zeros() as i64 } @@ -123,6 +129,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::{x86::*, x86_64::*}; @@ -134,7 +141,7 @@ mod tests { } #[simd_test(enable = "bmi1")] - unsafe fn test_andn_u64() { + const unsafe fn test_andn_u64() { assert_eq!(_andn_u64(0, 0), 0); assert_eq!(_andn_u64(0, 1), 1); assert_eq!(_andn_u64(1, 0), 0); @@ -157,25 +164,25 @@ mod tests { } #[simd_test(enable = "bmi1")] - unsafe fn test_blsi_u64() { + const unsafe fn test_blsi_u64() { assert_eq!(_blsi_u64(0b1101_0000u64), 0b0001_0000u64); } #[simd_test(enable = "bmi1")] - unsafe fn test_blsmsk_u64() { + const unsafe fn test_blsmsk_u64() { let r = _blsmsk_u64(0b0011_0000u64); assert_eq!(r, 0b0001_1111u64); } #[simd_test(enable = "bmi1")] - unsafe fn test_blsr_u64() { + const unsafe fn test_blsr_u64() { // TODO: test the behavior when the input is `0`. let r = _blsr_u64(0b0011_0000u64); assert_eq!(r, 0b0010_0000u64); } #[simd_test(enable = "bmi1")] - unsafe fn test_tzcnt_u64() { + const unsafe fn test_tzcnt_u64() { assert_eq!(_tzcnt_u64(0b0000_0001u64), 0u64); assert_eq!(_tzcnt_u64(0b0000_0000u64), 64u64); assert_eq!(_tzcnt_u64(0b1001_0000u64), 4u64); diff --git a/crates/core_arch/src/x86_64/bmi2.rs b/crates/core_arch/src/x86_64/bmi2.rs index ea9daf8857..4eb53ed5ea 100644 --- a/crates/core_arch/src/x86_64/bmi2.rs +++ b/crates/core_arch/src/x86_64/bmi2.rs @@ -24,7 +24,8 @@ use stdarch_test::assert_instr; #[target_feature(enable = "bmi2")] #[cfg(not(target_arch = "x86"))] // calls an intrinsic #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mulx_u64(a: u64, b: u64, hi: &mut u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mulx_u64(a: u64, b: u64, hi: &mut u64) -> u64 { let result: u128 = (a as u128) * (b as u128); *hi = (result >> 64) as u64; result as u64 @@ -79,6 +80,7 @@ unsafe extern "C" { #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86_64::*; @@ -120,7 +122,7 @@ mod tests { #[simd_test(enable = "bmi2")] #[rustfmt::skip] - unsafe fn test_mulx_u64() { +const unsafe fn test_mulx_u64() { let a: u64 = 9_223_372_036_854_775_800; let b: u64 = 100; let mut hi = 0; diff --git a/crates/core_arch/src/x86_64/bswap.rs b/crates/core_arch/src/x86_64/bswap.rs index 4e2d8b96ea..1b1d739a62 100644 --- a/crates/core_arch/src/x86_64/bswap.rs +++ b/crates/core_arch/src/x86_64/bswap.rs @@ -11,16 +11,20 @@ use stdarch_test::assert_instr; #[inline] #[cfg_attr(test, assert_instr(bswap))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _bswap64(x: i64) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _bswap64(x: i64) -> i64 { x.swap_bytes() } #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; + use stdarch_test::simd_test; + use super::*; - #[test] - fn test_bswap64() { + #[simd_test] + const fn test_bswap64() { assert_eq!(_bswap64(0x0EADBEEFFADECA0E), 0x0ECADEFAEFBEAD0E); assert_eq!(_bswap64(0x0000000000000000), 0x0000000000000000); } diff --git a/crates/core_arch/src/x86_64/sse.rs b/crates/core_arch/src/x86_64/sse.rs index 6bd7ec83ec..e5c1f79153 100644 --- a/crates/core_arch/src/x86_64/sse.rs +++ b/crates/core_arch/src/x86_64/sse.rs @@ -62,13 +62,15 @@ pub fn _mm_cvttss_si64(a: __m128) -> i64 { #[target_feature(enable = "sse")] #[cfg_attr(test, assert_instr(cvtsi2ss))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsi64_ss(a: __m128, b: i64) -> __m128 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi64_ss(a: __m128, b: i64) -> __m128 { unsafe { simd_insert!(a, 0, b as f32) } } #[cfg(test)] mod tests { use crate::core_arch::arch::x86_64::*; + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; #[simd_test(enable = "sse")] @@ -123,21 +125,31 @@ mod tests { } #[simd_test(enable = "sse")] - unsafe fn test_mm_cvtsi64_ss() { - let inputs = &[ - (4555i64, 4555.0f32), - (322223333, 322223330.0), - (-432, -432.0), - (-322223333, -322223330.0), - (9223372036854775807, 9.223372e18), - (-9223372036854775808, -9.223372e18), - ]; + const unsafe fn test_mm_cvtsi64_ss() { + let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); - for &(x, f) in inputs { - let a = _mm_setr_ps(5.0, 6.0, 7.0, 8.0); - let r = _mm_cvtsi64_ss(a, x); - let e = _mm_setr_ps(f, 6.0, 7.0, 8.0); - assert_eq_m128(e, r); - } + let r = _mm_cvtsi64_ss(a, 4555); + let e = _mm_setr_ps(4555.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + + let r = _mm_cvtsi64_ss(a, 322223333); + let e = _mm_setr_ps(322223333.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + + let r = _mm_cvtsi64_ss(a, -432); + let e = _mm_setr_ps(-432.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + + let r = _mm_cvtsi64_ss(a, -322223333); + let e = _mm_setr_ps(-322223333.0, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + + let r = _mm_cvtsi64_ss(a, 9223372036854775807); + let e = _mm_setr_ps(9.223372e18, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); + + let r = _mm_cvtsi64_ss(a, -9223372036854775808); + let e = _mm_setr_ps(-9.223372e18, 6.0, 7.0, 8.0); + assert_eq_m128(e, r); } } diff --git a/crates/core_arch/src/x86_64/sse2.rs b/crates/core_arch/src/x86_64/sse2.rs index 0894aa9810..9f350f2368 100644 --- a/crates/core_arch/src/x86_64/sse2.rs +++ b/crates/core_arch/src/x86_64/sse2.rs @@ -95,7 +95,8 @@ pub unsafe fn _mm_stream_si64(mem_addr: *mut i64, a: i64) { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsi64_si128(a: i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi64_si128(a: i64) -> __m128i { _mm_set_epi64x(0, a) } @@ -107,7 +108,8 @@ pub fn _mm_cvtsi64_si128(a: i64) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsi64x_si128(a: i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi64x_si128(a: i64) -> __m128i { _mm_cvtsi64_si128(a) } @@ -118,7 +120,8 @@ pub fn _mm_cvtsi64x_si128(a: i64) -> __m128i { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsi128_si64(a: __m128i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi128_si64(a: __m128i) -> i64 { unsafe { simd_extract!(a.as_i64x2(), 0) } } @@ -129,7 +132,8 @@ pub fn _mm_cvtsi128_si64(a: __m128i) -> i64 { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(movq))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsi128_si64x(a: __m128i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi128_si64x(a: __m128i) -> i64 { _mm_cvtsi128_si64(a) } @@ -141,7 +145,8 @@ pub fn _mm_cvtsi128_si64x(a: __m128i) -> i64 { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(cvtsi2sd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsi64_sd(a: __m128d, b: i64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi64_sd(a: __m128d, b: i64) -> __m128d { unsafe { simd_insert!(a, 0, b as f64) } } @@ -153,13 +158,15 @@ pub fn _mm_cvtsi64_sd(a: __m128d, b: i64) -> __m128d { #[target_feature(enable = "sse2")] #[cfg_attr(test, assert_instr(cvtsi2sd))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_cvtsi64x_sd(a: __m128d, b: i64) -> __m128d { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_cvtsi64x_sd(a: __m128d, b: i64) -> __m128d { _mm_cvtsi64_sd(a, b) } #[cfg(test)] mod tests { use crate::core_arch::arch::x86_64::*; + use crate::core_arch::assert_eq_const as assert_eq; use std::boxed; use std::ptr; use stdarch_test::simd_test; @@ -206,19 +213,19 @@ mod tests { } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtsi64_si128() { + const unsafe fn test_mm_cvtsi64_si128() { let r = _mm_cvtsi64_si128(5); assert_eq_m128i(r, _mm_setr_epi64x(5, 0)); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtsi128_si64() { + const unsafe fn test_mm_cvtsi128_si64() { let r = _mm_cvtsi128_si64(_mm_setr_epi64x(5, 0)); assert_eq!(r, 5); } #[simd_test(enable = "sse2")] - unsafe fn test_mm_cvtsi64_sd() { + const unsafe fn test_mm_cvtsi64_sd() { let a = _mm_set1_pd(3.5); let r = _mm_cvtsi64_sd(a, 5); assert_eq_m128d(r, _mm_setr_pd(5.0, 3.5)); diff --git a/crates/core_arch/src/x86_64/sse41.rs b/crates/core_arch/src/x86_64/sse41.rs index 4b7d25f214..3b1c88ad8c 100644 --- a/crates/core_arch/src/x86_64/sse41.rs +++ b/crates/core_arch/src/x86_64/sse41.rs @@ -13,7 +13,8 @@ use stdarch_test::assert_instr; #[cfg_attr(test, assert_instr(pextrq, IMM1 = 1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_extract_epi64(a: __m128i) -> i64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_extract_epi64(a: __m128i) -> i64 { static_assert_uimm_bits!(IMM1, 1); unsafe { simd_extract!(a.as_i64x2(), IMM1 as u32) } } @@ -27,7 +28,8 @@ pub fn _mm_extract_epi64(a: __m128i) -> i64 { #[cfg_attr(test, assert_instr(pinsrq, IMM1 = 0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _mm_insert_epi64(a: __m128i, i: i64) -> __m128i { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _mm_insert_epi64(a: __m128i, i: i64) -> __m128i { static_assert_uimm_bits!(IMM1, 1); unsafe { transmute(simd_insert!(a.as_i64x2(), IMM1 as u32, i)) } } @@ -35,10 +37,11 @@ pub fn _mm_insert_epi64(a: __m128i, i: i64) -> __m128i { #[cfg(test)] mod tests { use crate::core_arch::arch::x86_64::*; + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_extract_epi64() { + const unsafe fn test_mm_extract_epi64() { let a = _mm_setr_epi64x(0, 1); let r = _mm_extract_epi64::<1>(a); assert_eq!(r, 1); @@ -47,7 +50,7 @@ mod tests { } #[simd_test(enable = "sse4.1")] - unsafe fn test_mm_insert_epi64() { + const unsafe fn test_mm_insert_epi64() { let a = _mm_set1_epi64x(0); let e = _mm_setr_epi64x(0, 32); let r = _mm_insert_epi64::<1>(a, 32); diff --git a/crates/core_arch/src/x86_64/tbm.rs b/crates/core_arch/src/x86_64/tbm.rs index f4bba709f6..e7d4b8d3e4 100644 --- a/crates/core_arch/src/x86_64/tbm.rs +++ b/crates/core_arch/src/x86_64/tbm.rs @@ -42,7 +42,8 @@ pub fn _bextri_u64(a: u64) -> u64 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blcfill))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blcfill_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blcfill_u64(x: u64) -> u64 { x & x.wrapping_add(1) } @@ -53,7 +54,8 @@ pub fn _blcfill_u64(x: u64) -> u64 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blci))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blci_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blci_u64(x: u64) -> u64 { x | !x.wrapping_add(1) } @@ -64,7 +66,8 @@ pub fn _blci_u64(x: u64) -> u64 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blcic))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blcic_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blcic_u64(x: u64) -> u64 { !x & x.wrapping_add(1) } @@ -76,7 +79,8 @@ pub fn _blcic_u64(x: u64) -> u64 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blcmsk))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blcmsk_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blcmsk_u64(x: u64) -> u64 { x ^ x.wrapping_add(1) } @@ -87,7 +91,8 @@ pub fn _blcmsk_u64(x: u64) -> u64 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blcs))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blcs_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blcs_u64(x: u64) -> u64 { x | x.wrapping_add(1) } @@ -98,7 +103,8 @@ pub fn _blcs_u64(x: u64) -> u64 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blsfill))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blsfill_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blsfill_u64(x: u64) -> u64 { x | x.wrapping_sub(1) } @@ -109,7 +115,8 @@ pub fn _blsfill_u64(x: u64) -> u64 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(blsic))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _blsic_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _blsic_u64(x: u64) -> u64 { !x | x.wrapping_sub(1) } @@ -121,7 +128,8 @@ pub fn _blsic_u64(x: u64) -> u64 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(t1mskc))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _t1mskc_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _t1mskc_u64(x: u64) -> u64 { !x | x.wrapping_add(1) } @@ -133,12 +141,14 @@ pub fn _t1mskc_u64(x: u64) -> u64 { #[target_feature(enable = "tbm")] #[cfg_attr(test, assert_instr(tzmsk))] #[stable(feature = "simd_x86", since = "1.27.0")] -pub fn _tzmsk_u64(x: u64) -> u64 { +#[rustc_const_unstable(feature = "stdarch_const_x86", issue = "149298")] +pub const fn _tzmsk_u64(x: u64) -> u64 { !x & x.wrapping_sub(1) } #[cfg(test)] mod tests { + use crate::core_arch::assert_eq_const as assert_eq; use stdarch_test::simd_test; use crate::core_arch::x86_64::*; @@ -149,13 +159,13 @@ mod tests { } #[simd_test(enable = "tbm")] - unsafe fn test_blcfill_u64() { + const unsafe fn test_blcfill_u64() { assert_eq!(_blcfill_u64(0b0101_0111u64), 0b0101_0000u64); assert_eq!(_blcfill_u64(0b1111_1111u64), 0u64); } #[simd_test(enable = "tbm")] - unsafe fn test_blci_u64() { + const unsafe fn test_blci_u64() { assert_eq!( _blci_u64(0b0101_0000u64), 0b1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1110u64 @@ -167,25 +177,25 @@ mod tests { } #[simd_test(enable = "tbm")] - unsafe fn test_blcic_u64() { + const unsafe fn test_blcic_u64() { assert_eq!(_blcic_u64(0b0101_0001u64), 0b0000_0010u64); assert_eq!(_blcic_u64(0b1111_1111u64), 0b1_0000_0000u64); } #[simd_test(enable = "tbm")] - unsafe fn test_blcmsk_u64() { + const unsafe fn test_blcmsk_u64() { assert_eq!(_blcmsk_u64(0b0101_0001u64), 0b0000_0011u64); assert_eq!(_blcmsk_u64(0b1111_1111u64), 0b1_1111_1111u64); } #[simd_test(enable = "tbm")] - unsafe fn test_blcs_u64() { + const unsafe fn test_blcs_u64() { assert_eq!(_blcs_u64(0b0101_0001u64), 0b0101_0011u64); assert_eq!(_blcs_u64(0b1111_1111u64), 0b1_1111_1111u64); } #[simd_test(enable = "tbm")] - unsafe fn test_blsfill_u64() { + const unsafe fn test_blsfill_u64() { assert_eq!(_blsfill_u64(0b0101_0100u64), 0b0101_0111u64); assert_eq!( _blsfill_u64(0u64), @@ -194,7 +204,7 @@ mod tests { } #[simd_test(enable = "tbm")] - unsafe fn test_blsic_u64() { + const unsafe fn test_blsic_u64() { assert_eq!( _blsic_u64(0b0101_0100u64), 0b1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1011u64 @@ -206,7 +216,7 @@ mod tests { } #[simd_test(enable = "tbm")] - unsafe fn test_t1mskc_u64() { + const unsafe fn test_t1mskc_u64() { assert_eq!( _t1mskc_u64(0b0101_0111u64), 0b1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1000u64 @@ -218,7 +228,7 @@ mod tests { } #[simd_test(enable = "tbm")] - unsafe fn test_tzmsk_u64() { + const unsafe fn test_tzmsk_u64() { assert_eq!(_tzmsk_u64(0b0101_1000u64), 0b0000_0111u64); assert_eq!(_tzmsk_u64(0b0101_1001u64), 0b0000_0000u64); } diff --git a/crates/simd-test-macro/src/lib.rs b/crates/simd-test-macro/src/lib.rs index b18e2d6b63..b8bb874480 100644 --- a/crates/simd-test-macro/src/lib.rs +++ b/crates/simd-test-macro/src/lib.rs @@ -7,43 +7,41 @@ #[macro_use] extern crate quote; -use proc_macro2::{Ident, Literal, Span, TokenStream, TokenTree}; +use proc_macro2::{Ident, Span, TokenStream, TokenTree}; use quote::ToTokens; use std::env; -fn string(s: &str) -> TokenTree { - Literal::string(s).into() -} - #[proc_macro_attribute] pub fn simd_test( attr: proc_macro::TokenStream, item: proc_macro::TokenStream, ) -> proc_macro::TokenStream { let tokens = TokenStream::from(attr).into_iter().collect::>(); - if tokens.len() != 3 { - panic!("expected #[simd_test(enable = \"feature\")]"); - } - match &tokens[0] { - TokenTree::Ident(tt) if *tt == "enable" => {} - _ => panic!("expected #[simd_test(enable = \"feature\")]"), - } - match &tokens[1] { - TokenTree::Punct(tt) if tt.as_char() == '=' => {} - _ => panic!("expected #[simd_test(enable = \"feature\")]"), - } - let enable_feature = match &tokens[2] { - TokenTree::Literal(tt) => tt.to_string(), - _ => panic!("expected #[simd_test(enable = \"feature\")]"), + let (target_features, target_feature_attr) = match &tokens[..] { + [] => (Vec::new(), TokenStream::new()), + [ + TokenTree::Ident(enable), + TokenTree::Punct(equals), + TokenTree::Literal(literal), + ] if enable == "enable" && equals.as_char() == '=' => { + let enable_feature = literal.to_string(); + let enable_feature = enable_feature.trim_start_matches('"').trim_end_matches('"'); + let target_features: Vec<_> = enable_feature + .replace('+', "") + .split(',') + .map(String::from) + .collect(); + + ( + target_features, + quote! { + #[target_feature(enable = #enable_feature)] + }, + ) + } + _ => panic!("expected #[simd_test(enable = \"feature\")] or #[simd_test]"), }; - let enable_feature = enable_feature.trim_start_matches('"').trim_end_matches('"'); - let target_features: Vec = enable_feature - .replace('+', "") - .split(',') - .map(String::from) - .collect(); - let enable_feature = string(enable_feature); let mut item = syn::parse_macro_input!(item as syn::ItemFn); let item_attrs = std::mem::take(&mut item.attrs); let name = &item.sig.ident; @@ -102,6 +100,19 @@ pub fn simd_test( TokenStream::new() }; + let (const_test, const_stability) = if item.sig.constness.is_some() { + ( + quote! { + const _: () = unsafe { #name() }; + }, + quote! { + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + }, + ) + } else { + (TokenStream::new(), TokenStream::new()) + }; + let ret: TokenStream = quote_spanned! { proc_macro2::Span::call_site() => #[allow(non_snake_case)] @@ -109,6 +120,8 @@ pub fn simd_test( #maybe_ignore #(#item_attrs)* fn #name() { + #const_test + let mut missing_features = ::std::vec::Vec::new(); #detect_missing_features if missing_features.is_empty() { @@ -118,7 +131,8 @@ pub fn simd_test( ::stdarch_test::assert_skip_test_ok(stringify!(#name), &missing_features); } - #[target_feature(enable = #enable_feature)] + #target_feature_attr + #const_stability #item } };